summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/RCU/RTFP.txt77
-rw-r--r--Documentation/RCU/UP.txt34
-rw-r--r--Documentation/RCU/checklist.txt20
-rw-r--r--Documentation/RCU/rcubarrier.txt7
-rw-r--r--Documentation/RCU/torture.txt23
-rw-r--r--Documentation/RCU/whatisRCU.txt14
-rw-r--r--Documentation/block/data-integrity.txt4
-rw-r--r--Documentation/cgroups/cpusets.txt12
-rw-r--r--Documentation/dvb/get_dvb_firmware53
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/filesystems/seq_file.txt2
-rw-r--r--Documentation/gcov.txt25
-rw-r--r--Documentation/hwlat_detector.txt64
-rw-r--r--Documentation/hwmon/hpfall.c64
-rw-r--r--Documentation/hwmon/tmp42136
-rw-r--r--Documentation/kernel-parameters.txt14
-rw-r--r--Documentation/kmemleak.txt23
-rw-r--r--Documentation/leds-lp3944.txt50
-rw-r--r--Documentation/power/power_supply_class.txt7
-rw-r--r--Documentation/powerpc/booting-without-of.txt1168
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/emac.txt148
-rw-r--r--Documentation/powerpc/dts-bindings/gpio/gpio.txt50
-rw-r--r--Documentation/powerpc/dts-bindings/gpio/led.txt17
-rw-r--r--Documentation/powerpc/dts-bindings/gpio/mdio.txt19
-rw-r--r--Documentation/powerpc/dts-bindings/marvell.txt521
-rw-r--r--Documentation/powerpc/dts-bindings/phy.txt25
-rw-r--r--Documentation/powerpc/dts-bindings/spi-bus.txt57
-rw-r--r--Documentation/powerpc/dts-bindings/usb-ehci.txt25
-rw-r--r--Documentation/powerpc/dts-bindings/xilinx.txt295
-rw-r--r--Documentation/scheduler/sched-rt-group.txt13
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt4
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt13
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt60
-rw-r--r--Documentation/spi/spidev_test.c10
-rw-r--r--Documentation/sysctl/kernel.txt30
-rw-r--r--Documentation/trace/events.txt9
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx3
-rw-r--r--Documentation/vm/slqbinfo.c1047
-rw-r--r--Documentation/x86/00-INDEX2
-rw-r--r--Documentation/x86/exception-tables.txt (renamed from Documentation/exception.txt)202
-rw-r--r--MAINTAINERS114
-rw-r--r--Makefile11
-rw-r--r--arch/alpha/include/asm/pci.h1
-rw-r--r--arch/alpha/include/asm/percpu.h100
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/include/asm/tlbflush.h1
-rw-r--r--arch/alpha/include/asm/topology.h18
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/Kconfig18
-rw-r--r--arch/arm/Kconfig.debug8
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/common/vic.c99
-rw-r--r--arch/arm/configs/kb9202_defconfig1158
-rw-r--r--arch/arm/configs/nhk8815_defconfig1316
-rw-r--r--arch/arm/configs/s3c2410_defconfig2
-rw-r--r--arch/arm/configs/s3c6400_defconfig1
-rw-r--r--arch/arm/configs/tct_hammer_defconfig1
-rw-r--r--arch/arm/configs/u300_defconfig92
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/mmu_context.h7
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pci.h2
-rw-r--r--arch/arm/include/asm/pgtable.h53
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/kernel/irq.c24
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/vmlinux.lds.S16
-rw-r--r--arch/arm/mach-at91/Kconfig49
-rw-r--r--arch/arm/mach-at91/Makefile6
-rw-r--r--arch/arm/mach-at91/Makefile.boot4
-rw-r--r--arch/arm/mach-at91/at91sam9261.c22
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c360
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c1230
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c14
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c54
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c389
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c85
-rw-r--r--arch/arm/mach-at91/clock.c66
-rw-r--r--arch/arm/mach-at91/generic.h2
-rw-r--r--arch/arm/mach-at91/gpio.c15
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261.h3
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45.h138
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h153
-rw-r--r--arch/arm/mach-at91/include/mach/board.h8
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h23
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h4
-rw-r--r--arch/arm/mach-at91/include/mach/timex.h10
-rw-r--r--arch/arm/mach-at91/pm.c3
-rw-r--r--arch/arm/mach-ep93xx/clock.c101
-rw-r--r--arch/arm/mach-ep93xx/gpio.c64
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h61
-rw-r--r--arch/arm/mach-ep93xx/include/mach/hardware.h14
-rw-r--r--arch/arm/mach-ep93xx/include/mach/io.h17
-rw-r--r--arch/arm/mach-nomadik/Kconfig21
-rw-r--r--arch/arm/mach-nomadik/Makefile19
-rw-r--r--arch/arm/mach-nomadik/Makefile.boot4
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c111
-rw-r--r--arch/arm/mach-nomadik/clock.c45
-rw-r--r--arch/arm/mach-nomadik/clock.h14
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c139
-rw-r--r--arch/arm/mach-nomadik/gpio.c396
-rw-r--r--arch/arm/mach-nomadik/i2c-8815nhk.c65
-rw-r--r--arch/arm/mach-nomadik/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-nomadik/include/mach/debug-macro.S22
-rw-r--r--arch/arm/mach-nomadik/include/mach/entry-macro.S43
-rw-r--r--arch/arm/mach-nomadik/include/mach/gpio.h71
-rw-r--r--arch/arm/mach-nomadik/include/mach/hardware.h90
-rw-r--r--arch/arm/mach-nomadik/include/mach/io.h22
-rw-r--r--arch/arm/mach-nomadik/include/mach/irqs.h82
-rw-r--r--arch/arm/mach-nomadik/include/mach/memory.h28
-rw-r--r--arch/arm/mach-nomadik/include/mach/mtu.h45
-rw-r--r--arch/arm/mach-nomadik/include/mach/setup.h22
-rw-r--r--arch/arm/mach-nomadik/include/mach/system.h45
-rw-r--r--arch/arm/mach-nomadik/include/mach/timex.h6
-rw-r--r--arch/arm/mach-nomadik/include/mach/uncompress.h63
-rw-r--r--arch/arm/mach-nomadik/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-nomadik/timer.c164
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c3
-rw-r--r--arch/arm/mach-omap1/mailbox.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c1
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c21
-rw-r--r--arch/arm/mach-omap2/id.c22
-rw-r--r--arch/arm/mach-omap2/mailbox.c6
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c13
-rw-r--r--arch/arm/mach-realview/include/mach/gpio.h1
-rw-r--r--arch/arm/mach-realview/realview_eb.c22
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c3
-rw-r--r--arch/arm/mach-s3c2442/mach-gta02.c3
-rw-r--r--arch/arm/mach-u300/clock.c121
-rw-r--r--arch/arm/mach-versatile/core.c15
-rw-r--r--arch/arm/mach-versatile/include/mach/gpio.h1
-rw-r--r--arch/arm/mach-versatile/versatile_pb.c15
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/arm/mm/proc-syms.c1
-rw-r--r--arch/arm/plat-omap/debug-leds.c11
-rw-r--r--arch/arm/plat-omap/dma.c13
-rw-r--r--arch/arm/plat-omap/gpio.c15
-rw-r--r--arch/arm/plat-omap/include/mach/cpu.h22
-rw-r--r--arch/arm/plat-omap/include/mach/dma.h15
-rw-r--r--arch/arm/plat-omap/include/mach/io.h2
-rw-r--r--arch/arm/plat-omap/iommu.c2
-rw-r--r--arch/arm/plat-omap/sram.c7
-rw-r--r--arch/arm/plat-s3c/Makefile2
-rw-r--r--arch/arm/plat-s3c/include/plat/devs.h1
-rw-r--r--arch/arm/plat-s3c24xx/Makefile2
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c3
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c3
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/mm/sram-alloc.c6
-rw-r--r--arch/cris/include/arch-v10/arch/mmu.h9
-rw-r--r--arch/cris/include/arch-v32/arch/mmu.h10
-rw-r--r--arch/cris/include/asm/mmu_context.h3
-rw-r--r--arch/cris/include/asm/pgtable.h2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/atomic.h68
-rw-r--r--arch/frv/include/asm/gdb-stub.h1
-rw-r--r--arch/frv/include/asm/perf_counter.h17
-rw-r--r--arch/frv/include/asm/system.h2
-rw-r--r--arch/frv/include/asm/unistd.h4
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/frv_ksyms.c4
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/frv/lib/Makefile4
-rw-r--r--arch/frv/lib/atomic-ops.S3
-rw-r--r--arch/frv/lib/atomic64-ops.S162
-rw-r--r--arch/frv/lib/perf_counter.c19
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c10
-rw-r--r--arch/h8300/include/asm/pci.h1
-rw-r--r--arch/h8300/kernel/timer/tpu.c1
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/include/asm/pci.h14
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h3
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/smp.c5
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/kvm/Kconfig11
-rw-r--r--arch/ia64/kvm/kvm-ia64.c65
-rw-r--r--arch/ia64/kvm/kvm_lib.c6
-rw-r--r--arch/ia64/kvm/process.c6
-rw-r--r--arch/ia64/kvm/vcpu.c6
-rw-r--r--arch/ia64/kvm/vtlb.c4
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/io_common.c3
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/kernel/smp.c30
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68k/include/asm/checksum.h160
-rw-r--r--arch/m68k/include/asm/checksum_mm.h148
-rw-r--r--arch/m68k/include/asm/checksum_no.h132
-rw-r--r--arch/m68k/include/asm/dma.h492
-rw-r--r--arch/m68k/include/asm/dma_mm.h16
-rw-r--r--arch/m68k/include/asm/dma_no.h494
-rw-r--r--arch/m68k/include/asm/gpio.h238
-rw-r--r--arch/m68k/include/asm/hardirq.h21
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h16
-rw-r--r--arch/m68k/include/asm/hardirq_no.h27
-rw-r--r--arch/m68k/include/asm/irq.h135
-rw-r--r--arch/m68k/include/asm/irq_mm.h126
-rw-r--r--arch/m68k/include/asm/irq_no.h26
-rw-r--r--arch/m68k/include/asm/m5206sim.h33
-rw-r--r--arch/m68k/include/asm/m520xsim.h77
-rw-r--r--arch/m68k/include/asm/m523xsim.h77
-rw-r--r--arch/m68k/include/asm/m5249sim.h54
-rw-r--r--arch/m68k/include/asm/m5272sim.h29
-rw-r--r--arch/m68k/include/asm/m527xsim.h169
-rw-r--r--arch/m68k/include/asm/m528xsim.h151
-rw-r--r--arch/m68k/include/asm/m5307sim.h32
-rw-r--r--arch/m68k/include/asm/m532xsim.h198
-rw-r--r--arch/m68k/include/asm/m5407sim.h28
-rw-r--r--arch/m68k/include/asm/mcfgpio.h40
-rw-r--r--arch/m68k/include/asm/mcfintc.h89
-rw-r--r--arch/m68k/include/asm/mcfsim.h93
-rw-r--r--arch/m68k/include/asm/mcfsmc.h6
-rw-r--r--arch/m68k/include/asm/nettel.h4
-rw-r--r--arch/m68k/include/asm/pinmux.h30
-rw-r--r--arch/m68k/include/asm/processor.h171
-rw-r--r--arch/m68k/include/asm/processor_mm.h130
-rw-r--r--arch/m68k/include/asm/processor_no.h143
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds1
-rw-r--r--arch/m68knommu/Kconfig6
-rw-r--r--arch/m68knommu/kernel/irq.c21
-rw-r--r--arch/m68knommu/kernel/time.c2
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68knommu/lib/checksum.c9
-rw-r--r--arch/m68knommu/platform/5206/Makefile2
-rw-r--r--arch/m68knommu/platform/5206/config.c56
-rw-r--r--arch/m68knommu/platform/5206/gpio.c49
-rw-r--r--arch/m68knommu/platform/5206e/Makefile2
-rw-r--r--arch/m68knommu/platform/5206e/config.c58
-rw-r--r--arch/m68knommu/platform/5206e/gpio.c49
-rw-r--r--arch/m68knommu/platform/520x/Makefile2
-rw-r--r--arch/m68knommu/platform/520x/config.c30
-rw-r--r--arch/m68knommu/platform/520x/gpio.c211
-rw-r--r--arch/m68knommu/platform/523x/Makefile2
-rw-r--r--arch/m68knommu/platform/523x/config.c63
-rw-r--r--arch/m68knommu/platform/523x/gpio.c283
-rw-r--r--arch/m68knommu/platform/5249/Makefile2
-rw-r--r--arch/m68knommu/platform/5249/config.c49
-rw-r--r--arch/m68knommu/platform/5249/gpio.c65
-rw-r--r--arch/m68knommu/platform/5249/intc2.c59
-rw-r--r--arch/m68knommu/platform/5272/Makefile2
-rw-r--r--arch/m68knommu/platform/5272/config.c29
-rw-r--r--arch/m68knommu/platform/5272/gpio.c81
-rw-r--r--arch/m68knommu/platform/527x/Makefile2
-rw-r--r--arch/m68knommu/platform/527x/config.c49
-rw-r--r--arch/m68knommu/platform/527x/gpio.c607
-rw-r--r--arch/m68knommu/platform/528x/Makefile2
-rw-r--r--arch/m68knommu/platform/528x/config.c51
-rw-r--r--arch/m68knommu/platform/528x/gpio.c438
-rw-r--r--arch/m68knommu/platform/5307/Makefile2
-rw-r--r--arch/m68knommu/platform/5307/config.c65
-rw-r--r--arch/m68knommu/platform/5307/gpio.c49
-rw-r--r--arch/m68knommu/platform/532x/Makefile2
-rw-r--r--arch/m68knommu/platform/532x/config.c53
-rw-r--r--arch/m68knommu/platform/532x/gpio.c337
-rw-r--r--arch/m68knommu/platform/5407/Makefile2
-rw-r--r--arch/m68knommu/platform/5407/config.c68
-rw-r--r--arch/m68knommu/platform/5407/gpio.c49
-rw-r--r--arch/m68knommu/platform/68328/ints.c72
-rw-r--r--arch/m68knommu/platform/68360/ints.c44
-rw-r--r--arch/m68knommu/platform/coldfire/Makefile23
-rw-r--r--arch/m68knommu/platform/coldfire/gpio.c127
-rw-r--r--arch/m68knommu/platform/coldfire/intc-2.c93
-rw-r--r--arch/m68knommu/platform/coldfire/intc-simr.c78
-rw-r--r--arch/m68knommu/platform/coldfire/intc.c153
-rw-r--r--arch/m68knommu/platform/coldfire/pinmux.c28
-rw-r--r--arch/m68knommu/platform/coldfire/pit.c8
-rw-r--r--arch/m68knommu/platform/coldfire/timers.c18
-rw-r--r--arch/m68knommu/platform/coldfire/vectors.c20
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/include/asm/atomic.h101
-rw-r--r--arch/microblaze/include/asm/bitops.h26
-rw-r--r--arch/microblaze/include/asm/bug.h14
-rw-r--r--arch/microblaze/include/asm/bugs.h18
-rw-r--r--arch/microblaze/include/asm/checksum.h70
-rw-r--r--arch/microblaze/include/asm/device.h15
-rw-r--r--arch/microblaze/include/asm/fb.h1
-rw-r--r--arch/microblaze/include/asm/hardirq.h14
-rw-r--r--arch/microblaze/include/asm/ioctls.h92
-rw-r--r--arch/microblaze/include/asm/ipcbuf.h37
-rw-r--r--arch/microblaze/include/asm/irq.h6
-rw-r--r--arch/microblaze/include/asm/mman.h24
-rw-r--r--arch/microblaze/include/asm/mmu.h7
-rw-r--r--arch/microblaze/include/asm/mmu_context.h2
-rw-r--r--arch/microblaze/include/asm/mmu_context_no.h23
-rw-r--r--arch/microblaze/include/asm/module.h10
-rw-r--r--arch/microblaze/include/asm/msgbuf.h32
-rw-r--r--arch/microblaze/include/asm/param.h31
-rw-r--r--arch/microblaze/include/asm/parport.h1
-rw-r--r--arch/microblaze/include/asm/pci.h2
-rw-r--r--arch/microblaze/include/asm/posix_types.h68
-rw-r--r--arch/microblaze/include/asm/scatterlist.h29
-rw-r--r--arch/microblaze/include/asm/sembuf.h35
-rw-r--r--arch/microblaze/include/asm/serial.h15
-rw-r--r--arch/microblaze/include/asm/shmbuf.h43
-rw-r--r--arch/microblaze/include/asm/shmparam.h7
-rw-r--r--arch/microblaze/include/asm/siginfo.h14
-rw-r--r--arch/microblaze/include/asm/signal.h166
-rw-r--r--arch/microblaze/include/asm/socket.h70
-rw-r--r--arch/microblaze/include/asm/sockios.h24
-rw-r--r--arch/microblaze/include/asm/stat.h69
-rw-r--r--arch/microblaze/include/asm/swab.h9
-rw-r--r--arch/microblaze/include/asm/syscalls.h46
-rw-r--r--arch/microblaze/include/asm/system.h3
-rw-r--r--arch/microblaze/include/asm/termbits.h204
-rw-r--r--arch/microblaze/include/asm/termios.h89
-rw-r--r--arch/microblaze/include/asm/timex.h6
-rw-r--r--arch/microblaze/include/asm/types.h39
-rw-r--r--arch/microblaze/include/asm/ucontext.h23
-rw-r--r--arch/microblaze/include/asm/unistd.h6
-rw-r--r--arch/microblaze/include/asm/vga.h2
-rw-r--r--arch/microblaze/kernel/entry-nommu.S20
-rw-r--r--arch/microblaze/kernel/entry.S34
-rw-r--r--arch/microblaze/kernel/signal.c174
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c20
-rw-r--r--arch/microblaze/kernel/syscall_table.S14
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/microblaze/lib/Makefile2
-rw-r--r--arch/microblaze/lib/checksum.c172
-rw-r--r--arch/mips/Kconfig29
-rw-r--r--arch/mips/Makefile7
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/ar7/Makefile10
-rw-r--r--arch/mips/ar7/clock.c440
-rw-r--r--arch/mips/ar7/gpio.c48
-rw-r--r--arch/mips/ar7/irq.c176
-rw-r--r--arch/mips/ar7/memory.c72
-rw-r--r--arch/mips/ar7/platform.c555
-rw-r--r--arch/mips/ar7/prom.c297
-rw-r--r--arch/mips/ar7/setup.c94
-rw-r--r--arch/mips/ar7/time.c30
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c2
-rw-r--r--arch/mips/cobalt/buttons.c2
-rw-r--r--arch/mips/cobalt/lcd.c2
-rw-r--r--arch/mips/cobalt/led.c2
-rw-r--r--arch/mips/cobalt/mtd.c2
-rw-r--r--arch/mips/cobalt/rtc.c2
-rw-r--r--arch/mips/cobalt/serial.c2
-rw-r--r--arch/mips/cobalt/time.c2
-rw-r--r--arch/mips/configs/ar7_defconfig1182
-rw-r--r--arch/mips/gt64120/wrppmc/serial.c2
-rw-r--r--arch/mips/include/asm/amon.h7
-rw-r--r--arch/mips/include/asm/ds1287.h2
-rw-r--r--arch/mips/include/asm/elf.h4
-rw-r--r--arch/mips/include/asm/gcmpregs.h2
-rw-r--r--arch/mips/include/asm/gic.h6
-rw-r--r--arch/mips/include/asm/irq_gt641xx.h2
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h178
-rw-r--r--arch/mips/include/asm/mach-ar7/gpio.h110
-rw-r--r--arch/mips/include/asm/mach-ar7/irq.h16
-rw-r--r--arch/mips/include/asm/mach-ar7/prom.h25
-rw-r--r--arch/mips/include/asm/mach-ar7/spaces.h22
-rw-r--r--arch/mips/include/asm/mach-ar7/war.h25
-rw-r--r--arch/mips/include/asm/mach-cobalt/irq.h2
-rw-r--r--arch/mips/include/asm/mach-cobalt/mach-gt64120.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h10
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h45
-rw-r--r--arch/mips/include/asm/page.h9
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/reg.h2
-rw-r--r--arch/mips/include/asm/smp-ops.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/swab.h8
-rw-r--r--arch/mips/include/asm/unistd.h18
-rw-r--r--arch/mips/include/asm/vr41xx/capcella.h2
-rw-r--r--arch/mips/include/asm/vr41xx/giu.h22
-rw-r--r--arch/mips/include/asm/vr41xx/irq.h2
-rw-r--r--arch/mips/include/asm/vr41xx/mpc30x.h2
-rw-r--r--arch/mips/include/asm/vr41xx/pci.h2
-rw-r--r--arch/mips/include/asm/vr41xx/siu.h2
-rw-r--r--arch/mips/include/asm/vr41xx/tb0219.h2
-rw-r--r--arch/mips/include/asm/vr41xx/tb0226.h2
-rw-r--r--arch/mips/include/asm/vr41xx/vr41xx.h2
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c20
-rw-r--r--arch/mips/kernel/cevt-ds1287.c2
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/csrc-ioasic.c2
-rw-r--r--arch/mips/kernel/irq-gic.c19
-rw-r--r--arch/mips/kernel/irq-gt641xx.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp-cmp.c70
-rw-r--r--arch/mips/kernel/smp-mt.c4
-rw-r--r--arch/mips/kernel/smp-up.c3
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/sync-r4k.c31
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/mipssim/sim_smtc.c5
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mti-malta/malta-init.c14
-rw-r--r--arch/mips/mti-malta/malta-int.c89
-rw-r--r--arch/mips/mti-malta/malta-reset.c3
-rw-r--r--arch/mips/mti-malta/malta-smtc.c4
-rw-r--r--arch/mips/pci/Makefile5
-rw-r--r--arch/mips/pci/fixup-capcella.c2
-rw-r--r--arch/mips/pci/fixup-mpc30x.c2
-rw-r--r--arch/mips/pci/fixup-tb0219.c2
-rw-r--r--arch/mips/pci/fixup-tb0226.c2
-rw-r--r--arch/mips/pci/fixup-tb0287.c2
-rw-r--r--arch/mips/pci/msi-octeon.c288
-rw-r--r--arch/mips/pci/ops-vr41xx.c6
-rw-r--r--arch/mips/pci/pci-octeon.c675
-rw-r--r--arch/mips/pci/pci-vr41xx.c6
-rw-r--r--arch/mips/pci/pci-vr41xx.h4
-rw-r--r--arch/mips/pci/pcie-octeon.c1369
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/smp.c5
-rw-r--r--arch/mips/vr41xx/casio-e55/setup.c2
-rw-r--r--arch/mips/vr41xx/common/bcu.c8
-rw-r--r--arch/mips/vr41xx/common/cmu.c8
-rw-r--r--arch/mips/vr41xx/common/giu.c2
-rw-r--r--arch/mips/vr41xx/common/icu.c8
-rw-r--r--arch/mips/vr41xx/common/init.c2
-rw-r--r--arch/mips/vr41xx/common/irq.c2
-rw-r--r--arch/mips/vr41xx/common/pmu.c2
-rw-r--r--arch/mips/vr41xx/common/rtc.c2
-rw-r--r--arch/mips/vr41xx/common/siu.c2
-rw-r--r--arch/mips/vr41xx/common/type.c2
-rw-r--r--arch/mips/vr41xx/ibm-workpad/setup.c2
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h1
-rw-r--r--arch/mn10300/include/asm/mmu_context.h12
-rw-r--r--arch/mn10300/include/asm/pci.h17
-rw-r--r--arch/mn10300/include/asm/unistd.h4
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S3
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/atomic.h14
-rw-r--r--arch/parisc/include/asm/dma.h3
-rw-r--r--arch/parisc/include/asm/pci.h1
-rw-r--r--arch/parisc/include/asm/perf_counter.h7
-rw-r--r--arch/parisc/include/asm/processor.h3
-rw-r--r--arch/parisc/include/asm/smp.h1
-rw-r--r--arch/parisc/include/asm/system.h4
-rw-r--r--arch/parisc/include/asm/tlbflush.h14
-rw-r--r--arch/parisc/include/asm/unistd.h6
-rw-r--r--arch/parisc/kernel/cache.c26
-rw-r--r--arch/parisc/kernel/inventory.c41
-rw-r--r--arch/parisc/kernel/irq.c10
-rw-r--r--arch/parisc/kernel/pci-dma.c12
-rw-r--r--arch/parisc/kernel/pci.c3
-rw-r--r--arch/parisc/kernel/processor.c19
-rw-r--r--arch/parisc/kernel/setup.c3
-rw-r--r--arch/parisc/kernel/sys_parisc32.c62
-rw-r--r--arch/parisc/kernel/syscall_table.S4
-rw-r--r--arch/parisc/kernel/time.c90
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/lib/checksum.c2
-rw-r--r--arch/parisc/lib/memcpy.c2
-rw-r--r--arch/parisc/math-emu/decode_exc.c2
-rw-r--r--arch/parisc/mm/fault.c3
-rw-r--r--arch/parisc/mm/init.c29
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/boot/.gitignore10
-rw-r--r--arch/powerpc/boot/dts/amigaone.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts1
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h24
-rw-r--r--arch/powerpc/include/asm/highmem.h57
-rw-r--r--arch/powerpc/include/asm/hw_irq.h20
-rw-r--r--arch/powerpc/include/asm/kvm_host.h4
-rw-r--r--arch/powerpc/include/asm/pci.h1
-rw-r--r--arch/powerpc/include/asm/perf_counter.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h3
-rw-r--r--arch/powerpc/include/asm/rtas.h5
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/kernel/entry_32.S127
-rw-r--r--arch/powerpc/kernel/head_32.S17
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c1
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/rtas.c69
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/smp.c7
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/kvm/44x.c4
-rw-r--r--arch/powerpc/kvm/44x_tlb.c11
-rw-r--r--arch/powerpc/kvm/Kconfig14
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/booke.c2
-rw-r--r--arch/powerpc/kvm/e500.c7
-rw-r--r--arch/powerpc/kvm/e500_emulate.c3
-rw-r--r--arch/powerpc/kvm/e500_tlb.c26
-rw-r--r--arch/powerpc/kvm/e500_tlb.h6
-rw-r--r--arch/powerpc/kvm/emulate.c7
-rw-r--r--arch/powerpc/kvm/powerpc.c19
-rw-r--r--arch/powerpc/kvm/trace.h104
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/highmem.c77
-rw-r--r--arch/powerpc/mm/stab.c2
-rw-r--r--arch/powerpc/platforms/44x/warp.c44
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c9
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c6
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c1
-rw-r--r--arch/powerpc/platforms/cell/smp.c30
-rw-r--r--arch/powerpc/platforms/chrp/smp.c33
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c15
-rw-r--r--arch/powerpc/platforms/powermac/setup.c41
-rw-r--r--arch/powerpc/platforms/powermac/smp.c166
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/powerpc/platforms/pseries/smp.c30
-rw-r--r--arch/powerpc/sysdev/mpic.c34
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c9
-rw-r--r--arch/s390/crypto/des_s390.c11
-rw-r--r--arch/s390/include/asm/chsc.h28
-rw-r--r--arch/s390/include/asm/cio.h223
-rw-r--r--arch/s390/include/asm/kvm_host.h19
-rw-r--r--arch/s390/include/asm/percpu.h32
-rw-r--r--arch/s390/include/asm/scsw.h (renamed from drivers/s390/cio/scsw.c)345
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/timex.h16
-rw-r--r--arch/s390/include/asm/topology.h1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/kernel/time.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kvm/Kconfig9
-rw-r--r--arch/s390/kvm/gaccess.h23
-rw-r--r--arch/s390/kvm/intercept.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c101
-rw-r--r--arch/s390/kvm/kvm-s390.h32
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c60
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Kconfig.debug4
-rw-r--r--arch/sh/boards/mach-se/7206/io.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c148
-rw-r--r--arch/sh/configs/migor_defconfig53
-rw-r--r--arch/sh/configs/se7724_defconfig25
-rw-r--r--arch/sh/include/asm/hwblk.h61
-rw-r--r--arch/sh/include/asm/lmb.h6
-rw-r--r--arch/sh/include/asm/pci.h1
-rw-r--r--arch/sh/include/asm/perf_counter.h4
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/include/asm/suspend.h9
-rw-r--r--arch/sh/include/asm/syscall_32.h1
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7722.h14
-rw-r--r--arch/sh/include/mach-se/mach/se7724.h5
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/hwblk.c130
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c60
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c106
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile1
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c102
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c26
-rw-r--r--arch/sh/kernel/setup.c71
-rw-r--r--arch/sh/kernel/time.c2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/mm/fault_32.c61
-rw-r--r--arch/sh/mm/numa.c36
-rw-r--r--arch/sh/mm/tlbflush_64.c15
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/boot/Makefile6
-rw-r--r--arch/sparc/boot/piggyback_32.c4
-rw-r--r--arch/sparc/boot/piggyback_64.c1
-rw-r--r--arch/sparc/include/asm/device.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h1
-rw-r--r--arch/sparc/include/asm/pci_64.h1
-rw-r--r--arch/sparc/include/asm/smp_64.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h16
-rw-r--r--arch/sparc/kernel/irq_64.c45
-rw-r--r--arch/sparc/kernel/smp_64.c42
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/um/drivers/slip_kern.c1
-rw-r--r--arch/um/drivers/slirp_kern.c1
-rw-r--r--arch/um/include/asm/dma-mapping.h4
-rw-r--r--arch/um/include/asm/mmu_context.h4
-rw-r--r--arch/um/include/asm/pci.h1
-rw-r--r--arch/um/kernel/dyn.lds.S2
-rw-r--r--arch/um/kernel/smp.c2
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/x86/Kconfig21
-rw-r--r--arch/x86/boot/video-bios.c3
-rw-r--r--arch/x86/boot/video-vesa.c11
-rw-r--r--arch/x86/boot/video-vga.c10
-rw-r--r--arch/x86/boot/video.c5
-rw-r--r--arch/x86/boot/video.h20
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/include/asm/apicdef.h2
-rw-r--r--arch/x86/include/asm/atomic_32.h185
-rw-r--r--arch/x86/include/asm/atomic_64.h42
-rw-r--r--arch/x86/include/asm/boot.h6
-rw-r--r--arch/x86/include/asm/device.h3
-rw-r--r--arch/x86/include/asm/fixmap.h10
-rw-r--r--arch/x86/include/asm/i387.h1
-rw-r--r--arch/x86/include/asm/ioctls.h95
-rw-r--r--arch/x86/include/asm/ipcbuf.h29
-rw-r--r--arch/x86/include/asm/kvm.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h47
-rw-r--r--arch/x86/include/asm/lguest_hcall.h2
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mman.h14
-rw-r--r--arch/x86/include/asm/mmu_context.h6
-rw-r--r--arch/x86/include/asm/module.h13
-rw-r--r--arch/x86/include/asm/msgbuf.h40
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/include/asm/param.h23
-rw-r--r--arch/x86/include/asm/paravirt.h711
-rw-r--r--arch/x86/include/asm/paravirt_types.h720
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/percpu.h1
-rw-r--r--arch/x86/include/asm/perf_counter.h3
-rw-r--r--arch/x86/include/asm/pgtable.h16
-rw-r--r--arch/x86/include/asm/proto.h11
-rw-r--r--arch/x86/include/asm/scatterlist.h27
-rw-r--r--arch/x86/include/asm/shmbuf.h52
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/socket.h61
-rw-r--r--arch/x86/include/asm/sockios.h14
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/termbits.h199
-rw-r--r--arch/x86/include/asm/termios.h115
-rw-r--r--arch/x86/include/asm/types.h12
-rw-r--r--arch/x86/include/asm/ucontext.h8
-rw-r--r--arch/x86/include/asm/vmx.h8
-rw-r--r--arch/x86/kernel/acpi/cstate.c36
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/amd_iommu_init.c13
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c38
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c16
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/amd.c97
-rw-r--r--arch/x86/kernel/cpu/mtrr/centaur.c168
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c350
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c94
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c304
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c135
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c455
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h19
-rw-r--r--arch/x86/kernel/cpu/mtrr/state.c68
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c44
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c5
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/ds.c6
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/dumpstack_32.c6
-rw-r--r--arch/x86/kernel/dumpstack_64.c22
-rw-r--r--arch/x86/kernel/e820.c16
-rw-r--r--arch/x86/kernel/kgdb.c9
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c27
-rw-r--r--arch/x86/kernel/process_64.c33
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c17
-rw-r--r--arch/x86/kernel/setup_percpu.c268
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/tlb_uv.c9
-rw-r--r--arch/x86/kernel/traps.c36
-rw-r--r--arch/x86/kvm/Kconfig21
-rw-r--r--arch/x86/kvm/Makefile35
-rw-r--r--arch/x86/kvm/i8254.c118
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c48
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h9
-rw-r--r--arch/x86/kvm/kvm_svm.h51
-rw-r--r--arch/x86/kvm/kvm_timer.h2
-rw-r--r--arch/x86/kvm/lapic.c318
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu.c355
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/paging_tmpl.h45
-rw-r--r--arch/x86/kvm/svm.c259
-rw-r--r--arch/x86/kvm/timer.c16
-rw-r--r--arch/x86/kvm/trace.h270
-rw-r--r--arch/x86/kvm/vmx.c433
-rw-r--r--arch/x86/kvm/x86.c636
-rw-r--r--arch/x86/kvm/x86.h4
-rw-r--r--arch/x86/kvm/x86_emulate.c259
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/atomic64_32.c230
-rw-r--r--arch/x86/lib/clear_page_64.S5
-rw-r--r--arch/x86/lib/copy_user_64.S1
-rw-r--r--arch/x86/lib/delay.c3
-rw-r--r--arch/x86/mm/fault.c51
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/init.c18
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/pageattr.c66
-rw-r--r--arch/x86/mm/tlb.c15
-rw-r--r--arch/x86/oprofile/nmi_int.c101
-rw-r--r--arch/x86/oprofile/op_model_amd.c267
-rw-r--r--arch/x86/oprofile/op_model_p4.c60
-rw-r--r--arch/x86/oprofile/op_model_ppro.c95
-rw-r--r--arch/x86/oprofile/op_x86_model.h47
-rw-r--r--arch/x86/pci/acpi.c59
-rw-r--r--arch/x86/pci/amd_bus.c8
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S1
-rw-r--r--block/Makefile2
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/blk-core.c133
-rw-r--r--block/blk-merge.c49
-rw-r--r--block/blk.h1
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c224
-rw-r--r--block/cmd-filter.c233
-rw-r--r--block/elevator.c8
-rw-r--r--block/scsi_ioctl.c43
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/ablkcipher.c23
-rw-r--r--crypto/ansi_cprng.c34
-rw-r--r--crypto/internal.h7
-rw-r--r--crypto/tcrypt.c15
-rw-r--r--crypto/testmgr.c21
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_memhotplug.c40
-rw-r--r--drivers/acpi/acpica/evevent.c13
-rw-r--r--drivers/acpi/acpica/hwsleep.c24
-rw-r--r--drivers/acpi/ec.c118
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/processor_aggregator.c389
-rw-r--r--drivers/acpi/processor_core.c244
-rw-r--r--drivers/acpi/processor_idle.c10
-rw-r--r--drivers/acpi/processor_perflib.c3
-rw-r--r--drivers/acpi/processor_thermal.c3
-rw-r--r--drivers/acpi/processor_throttling.c88
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/sleep.c14
-rw-r--r--drivers/acpi/video.c12
-rw-r--r--drivers/amba/bus.c12
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/base/platform.c36
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/Kconfig16
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/cciss.c15
-rw-r--r--drivers/block/cciss_cmd.h1
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/osdblk.c701
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/bsr.c42
-rw-r--r--drivers/char/cyclades.c2276
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c3
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/tb0219.c4
-rw-r--r--drivers/char/tty_ldisc.c15
-rw-r--r--drivers/char/vr41xx_giu.c680
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw_dmac.c15
-rw-r--r--drivers/dma/fsldma.c17
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/ioat.c28
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/txx9dmac.c15
-rw-r--r--drivers/edac/amd64_edac.c25
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/firewire/core-card.c14
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/core-iso.c24
-rw-r--r--drivers/firewire/core.h3
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/firmware/dcdbas.c49
-rw-r--r--drivers/firmware/dmi_scan.c34
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/gpio/vr41xx_giu.c586
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c25
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h29
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c34
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c199
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1153
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_dp_i2c.c272
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c35
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c344
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c72
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/hid/Kconfig22
-rw-r--r--drivers/hid/Makefile5
-rw-r--r--drivers/hid/hid-a4tech.c4
-rw-r--r--drivers/hid/hid-apple.c4
-rw-r--r--drivers/hid/hid-belkin.c4
-rw-r--r--drivers/hid/hid-cherry.c4
-rw-r--r--drivers/hid/hid-chicony.c4
-rw-r--r--drivers/hid/hid-core.c58
-rw-r--r--drivers/hid/hid-cypress.c4
-rw-r--r--drivers/hid/hid-debug.c438
-rw-r--r--drivers/hid/hid-ezkey.c4
-rw-r--r--drivers/hid/hid-gyration.c4
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-kensington.c4
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-lg.c6
-rw-r--r--drivers/hid/hid-lgff.c1
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-monterey.c4
-rw-r--r--drivers/hid/hid-ntrig.c37
-rw-r--r--drivers/hid/hid-petalynx.c4
-rw-r--r--drivers/hid/hid-pl.c4
-rw-r--r--drivers/hid/hid-samsung.c43
-rw-r--r--drivers/hid/hid-sjoy.c4
-rw-r--r--drivers/hid/hid-sony.c4
-rw-r--r--drivers/hid/hid-sunplus.c4
-rw-r--r--drivers/hid/hid-tmff.c10
-rw-r--r--drivers/hid/hid-topseed.c4
-rw-r--r--drivers/hid/hid-wacom.c4
-rw-r--r--drivers/hid/hid-zpff.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c9
-rw-r--r--drivers/hid/usbhid/hiddev.c4
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru3.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c10
-rw-r--r--drivers/hwmon/max6650.c1
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/tmp421.c347
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-pxa.c25
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c25
-rw-r--r--drivers/ide/cs5520.c1
-rw-r--r--drivers/ide/ide-acpi.c37
-rw-r--r--drivers/ide/ide-cd.c24
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-dma.c21
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c68
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-iops.c4
-rw-r--r--drivers/ide/ide-pm.c30
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ieee1394/sbp2.c1
-rw-r--r--drivers/ieee1394/sbp2.h8
-rw-r--r--drivers/input/keyboard/Kconfig294
-rw-r--r--drivers/input/keyboard/Makefile33
-rw-r--r--drivers/input/keyboard/gpio_keys.c33
-rw-r--r--drivers/input/keyboard/matrix_keypad.c453
-rw-r--r--drivers/input/misc/cobalt_btns.c4
-rw-r--r--drivers/input/misc/dm355evm_keys.c42
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h28
-rw-r--r--drivers/input/tablet/wacom_wac.c6
-rw-r--r--drivers/isdn/gigaset/ev-layer.c44
-rw-r--r--drivers/isdn/gigaset/isocdata.c6
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-alix2.c7
-rw-r--r--drivers/leds/leds-bd2802.c96
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-gpio.c22
-rw-r--r--drivers/leds/leds-lp3944.c466
-rw-r--r--drivers/leds/leds-pca9532.c58
-rw-r--r--drivers/lguest/lg.h4
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/macintosh/macio_asic.c11
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c13
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c2
-rw-r--r--drivers/media/dvb/frontends/cx24123.c2
-rw-r--r--drivers/media/dvb/frontends/dib0070.c2
-rw-r--r--drivers/media/dvb/frontends/mt312.c7
-rw-r--r--drivers/media/dvb/frontends/stv0900_sw.c2
-rw-r--r--drivers/media/dvb/frontends/zl10036.c2
-rw-r--r--drivers/media/dvb/ttpci/Kconfig1
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/radio/radio-si470x.c386
-rw-r--r--drivers/media/video/Kconfig8
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/adv7343.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/video/cx18/cx18-cards.c34
-rw-r--r--drivers/media/video/cx18/cx18-driver.c41
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c160
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c30
-rw-r--r--drivers/media/video/cx23885/cx23885.h6
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c86
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c32
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c28
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c92
-rw-r--r--drivers/media/video/em28xx/em28xx.h3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h4
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/video/meye.c3
-rw-r--r--drivers/media/video/mt9v011.c431
-rw-r--r--drivers/media/video/mt9v011.h35
-rw-r--r--drivers/media/video/pwc/pwc.h6
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/soc_camera.c12
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c205
-rw-r--r--drivers/media/video/uvc/uvc_driver.c139
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c14
-rw-r--r--drivers/media/video/uvc/uvc_video.c30
-rw-r--r--drivers/media/video/uvc/uvcvideo.h160
-rw-r--r--drivers/media/video/v4l2-ioctl.c23
-rw-r--r--drivers/media/video/vivi.c99
-rw-r--r--drivers/mfd/ab3100-core.c8
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/ezx-pcap.c107
-rw-r--r--drivers/mfd/sm501.c3
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/misc/Kconfig29
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/hwlat_detector.c1208
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c6
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/integrator-flash.c22
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/omap2.c7
-rw-r--r--drivers/mtd/nftlcore.c16
-rw-r--r--drivers/mtd/ubi/build.c1
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h7
-rw-r--r--drivers/mtd/ubi/io.c99
-rw-r--r--drivers/mtd/ubi/scan.c14
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi-media.h12
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/wl.c8
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_hw.h4
-rw-r--r--drivers/net/benet/be_main.c61
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/defines.h3
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c270
-rw-r--r--drivers/net/e1000e/lib.c6
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/e1000e/phy.c12
-rw-r--r--drivers/net/fsl_pq_mdio.c8
-rw-r--r--drivers/net/igb/igb_main.c14
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c69
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/mdio.c4
-rw-r--r--drivers/net/netxen/netxen_nic.h17
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_init.c111
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/phy.c25
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/qlge/qlge_main.c153
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/slip.c96
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c41
-rw-r--r--drivers/oprofile/buffer_sync.c3
-rw-r--r--drivers/oprofile/cpu_buffer.c16
-rw-r--r--drivers/oprofile/oprofile_stats.c1
-rw-r--r--drivers/parisc/ccio-dma.c7
-rw-r--r--drivers/parisc/dino.c12
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/gsc.h2
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/lba_pci.c41
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parisc/superio.c6
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c2
-rw-r--r--drivers/pci/intel-iommu.c788
-rw-r--r--drivers/pci/iova.c26
-rw-r--r--drivers/pci/msi.c64
-rw-r--r--drivers/pci/msi.h10
-rw-r--r--drivers/pci/pci.c15
-rw-r--r--drivers/pci/pcie/aer/ecrc.c2
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/pci/setup-res.c1
-rw-r--r--drivers/pci/slot.c4
-rw-r--r--drivers/pcmcia/vrc4171_card.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.h2
-rw-r--r--drivers/pcmcia/yenta_socket.c16
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c346
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ds2782_battery.c330
-rw-r--r--drivers/power/olpc_battery.c76
-rw-r--r--drivers/power/power_supply_sysfs.c12
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c62
-rw-r--r--drivers/regulator/pcap-regulator.c318
-rw-r--r--drivers/regulator/virtual.c44
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/rtc/rtc-msm6242.c268
-rw-r--r--drivers/rtc/rtc-rp5c01.c222
-rw-r--r--drivers/rtc/rtc-vr41xx.c4
-rw-r--r--drivers/s390/block/dasd_eckd.c41
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c21
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/char/monreader.c4
-rw-r--r--drivers/s390/char/sclp_rw.h5
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.h24
-rw-r--r--drivers/scsi/constants.c95
-rw-r--r--drivers/scsi/cxgb3i/Kbuild2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c11
-rw-r--r--drivers/scsi/fnic/fnic_main.c8
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/serial/8250.c22
-rw-r--r--drivers/serial/8250.h1
-rw-r--r--drivers/serial/8250_pci.c6
-rw-r--r--drivers/serial/sh-sci.c7
-rw-r--r--drivers/serial/vr41xx_siu.c2
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/ssb/driver_mipscore.c85
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/gadget/m66592-udc.c34
-rw-r--r--drivers/usb/gadget/m66592-udc.h1
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/ohci-q.c2
-rw-r--r--drivers/usb/musb/musb_core.c18
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fsl-diu-fb.c14
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c5
-rw-r--r--drivers/video/mx3fb.c17
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c53
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c23
-rw-r--r--drivers/video/w100fb.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c2
-rw-r--r--drivers/watchdog/sa1100_wdt.c5
-rw-r--r--drivers/watchdog/w83627hf_wdt.c5
-rw-r--r--drivers/watchdog/w83697ug_wdt.c4
-rw-r--r--drivers/watchdog/wdrtas.c8
-rw-r--r--drivers/xen/events.c17
-rw-r--r--fs/afs/flock.c1
-rw-r--r--fs/aio.c24
-rw-r--r--fs/binfmt_elf.c9
-rw-r--r--fs/bio-integrity.c170
-rw-r--r--fs/bio.c11
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent-tree.c566
-rw-r--r--fs/btrfs/file.c5
-rw-r--r--fs/btrfs/inode.c25
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/relocation.c5
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/char_dev.c1
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/asn1.c55
-rw-r--r--fs/cifs/cifsfs.c157
-rw-r--r--fs/cifs/cifsfs.h13
-rw-r--r--fs/cifs/cifsglob.h27
-rw-r--r--fs/cifs/cifspdu.h14
-rw-r--r--fs/cifs/cifsproto.h11
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c39
-rw-r--r--fs/cifs/dir.c28
-rw-r--r--fs/cifs/dns_resolve.c25
-rw-r--r--fs/cifs/file.c34
-rw-r--r--fs/cifs/inode.c395
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/netmisc.c56
-rw-r--r--fs/cifs/readdir.c253
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/xattr.c12
-rw-r--r--fs/configfs/inode.c1
-rw-r--r--fs/eventfd.c122
-rw-r--r--fs/exofs/common.h4
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/exofs/exofs.h7
-rw-r--r--fs/exofs/file.c21
-rw-r--r--fs/exofs/inode.c7
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/exofs/osd.c4
-rw-r--r--fs/exofs/super.c6
-rw-r--r--fs/exofs/symlink.c4
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/fat/Kconfig20
-rw-r--r--fs/fat/dir.c15
-rw-r--r--fs/fat/namei_vfat.c68
-rw-r--r--fs/fs-writeback.c775
-rw-r--r--fs/fuse/dev.c83
-rw-r--r--fs/fuse/dir.c57
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/fuse_i.h27
-rw-r--r--fs/fuse/inode.c69
-rw-r--r--fs/gfs2/aops.c39
-rw-r--r--fs/gfs2/glock.c27
-rw-r--r--fs/gfs2/super.c39
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/namei.c7
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/notify/dnotify/dnotify.c2
-rw-r--r--fs/notify/inode_mark.c18
-rw-r--r--fs/notify/inotify/inotify_user.c5
-rw-r--r--fs/ocfs2/dlm/dlmfs.c1
-rw-r--r--fs/proc/base.c23
-rw-r--r--fs/proc/uptime.c7
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/reiserfs/Makefile2
-rw-r--r--fs/reiserfs/bitmap.c4
-rw-r--r--fs/reiserfs/dir.c8
-rw-r--r--fs/reiserfs/do_balan.c17
-rw-r--r--fs/reiserfs/fix_node.c19
-rw-r--r--fs/reiserfs/inode.c73
-rw-r--r--fs/reiserfs/ioctl.c6
-rw-r--r--fs/reiserfs/journal.c109
-rw-r--r--fs/reiserfs/lock.c89
-rw-r--r--fs/reiserfs/namei.c13
-rw-r--r--fs/reiserfs/prints.c4
-rw-r--r--fs/reiserfs/resize.c2
-rw-r--r--fs/reiserfs/stree.c53
-rw-r--r--fs/reiserfs/super.c55
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/super.c3
-rw-r--r--fs/sysfs/inode.c1
-rw-r--r--fs/ubifs/io.c57
-rw-r--r--fs/ubifs/replay.c2
-rw-r--r--fs/ubifs/super.c15
-rw-r--r--fs/ubifs/ubifs.h11
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h1
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_bmap.h11
-rw-r--r--fs/xfs/xfs_bmap_btree.c20
-rw-r--r--fs/xfs/xfs_bmap_btree.h1
-rw-r--r--fs/xfs/xfs_btree.c42
-rw-r--r--fs/xfs/xfs_btree.h15
-rw-r--r--fs/xfs/xfs_inode.c8
-rw-r--r--fs/xfs/xfs_inode.h5
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_itable.h5
-rw-r--r--fs/xfs/xfs_log_priv.h2
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_mru_cache.c29
-rw-r--r--fs/xfs/xfs_mru_cache.h1
-rw-r--r--fs/xfs/xfs_rw.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c2
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/asm-generic/device.h3
-rw-r--r--include/asm-generic/pci.h13
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/topology.h17
-rw-r--r--include/asm-generic/vmlinux.lds.h23
-rw-r--r--include/drm/drm_edid.h38
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/backing-dev.h71
-rw-r--r--include/linux/bio.h65
-rw-r--r--include/linux/blkdev.h42
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/cpumask.h709
-rw-r--r--include/linux/cyclades.h10
-rw-r--r--include/linux/eventfd.h35
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fips.h10
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/fsnotify_backend.h12
-rw-r--r--include/linux/fuse.h36
-rw-r--r--include/linux/hid-debug.h48
-rw-r--r--include/linux/hid.h22
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/i2c-id.h11
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/ide.h5
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/input/matrix_keypad.h65
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kfifo.h4
-rw-r--r--include/linux/kgdb.h48
-rw-r--r--include/linux/kvm.h95
-rw-r--r--include/linux/kvm_host.h105
-rw-r--r--include/linux/leds-lp3944.h53
-rw-r--r--include/linux/leds.h14
-rw-r--r--include/linux/lguest.h2
-rw-r--r--include/linux/linkage.h9
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/netfilter/xt_conntrack.h13
-rw-r--r--include/linux/netfilter/xt_osf.h2
-rw-r--r--include/linux/oprofile.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-defs.h67
-rw-r--r--include/linux/percpu.h80
-rw-r--r--include/linux/perf_counter.h46
-rw-r--r--include/linux/platform_device.h5
-rw-r--r--include/linux/poison.h16
-rw-r--r--include/linux/power_supply.h18
-rw-r--r--include/linux/rcu_types.h18
-rw-r--r--include/linux/rcuclassic.h178
-rw-r--r--include/linux/rcupdate.h38
-rw-r--r--include/linux/rcupreempt.h10
-rw-r--r--include/linux/rcutree.h12
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/reiserfs_fs.h68
-rw-r--r--include/linux/reiserfs_fs_sb.h20
-rw-r--r--include/linux/sched.h25
-rw-r--r--include/linux/security.h14
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/slqb_def.h301
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/spidev.h2
-rw-r--r--include/linux/timer.h4
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usb/video.h164
-rw-r--r--include/linux/videodev2.h39
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h15
-rw-r--r--include/media/v4l2-chip-ident.h3
-rw-r--r--include/media/v4l2-subdev.h3
-rw-r--r--include/net/bluetooth/l2cap.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/phonet/pn_dev.h1
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/sound/soc.h11
-rw-r--r--include/sound/tlv.h14
-rw-r--r--include/sound/uda1380.h22
-rw-r--r--include/trace/events/kvm.h95
-rw-r--r--init/Kconfig50
-rw-r--r--init/main.c29
-rw-r--r--ipc/mqueue.c2
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/audit.h26
-rw-r--r--kernel/audit_tree.c223
-rw-r--r--kernel/audit_watch.c293
-rw-r--r--kernel/auditfilter.c39
-rw-r--r--kernel/auditsc.c9
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/kfifo.c2
-rw-r--r--kernel/kgdb.c6
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/kthread.c10
-rw-r--r--kernel/module.c9
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/perf_counter.c326
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/power/hibernate.c21
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/snapshot.c398
-rw-r--r--kernel/printk.c146
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c807
-rw-r--r--kernel/rcupdate.c25
-rw-r--r--kernel/rcutorture.c202
-rw-r--r--kernel/rcutree.c3
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c192
-rw-r--r--kernel/smp.c7
-rw-r--r--kernel/sysctl.c13
-rw-r--r--kernel/time/timer_stats.c16
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/ftrace.c60
-rw-r--r--kernel/trace/kmemtrace.c120
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.c34
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_event_types.h3
-rw-r--r--kernel/trace/trace_events.c71
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/trace/trace_printk.c26
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_stat.c6
-rw-r--r--lib/Kconfig.debug47
-rw-r--r--lib/checksum.c14
-rw-r--r--lib/dma-debug.c4
-rw-r--r--mm/Makefile5
-rw-r--r--mm/allocpercpu.c28
-rw-r--r--mm/backing-dev.c533
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/kmemleak.c197
-rw-r--r--mm/memory.c26
-rw-r--r--mm/nommu.c33
-rw-r--r--mm/page-writeback.c167
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/pdflush.c269
-rw-r--r--mm/percpu.c1318
-rw-r--r--mm/quicklist.c5
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slqb.c3765
-rw-r--r--mm/slub.c6
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/9p/trans_fd.c14
-rw-r--r--net/bluetooth/hidp/core.c1
-rw-r--r--net/bluetooth/l2cap.c72
-rw-r--r--net/bluetooth/sco.c49
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/ieee802154/netlink.c6
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c17
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/xfrm4_policy.c3
-rw-r--r--net/ipv6/addrconf.c35
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/xfrm6_policy.c6
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/xt_conntrack.c66
-rw-r--r--net/phonet/pn_dev.c52
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/iw_stats.c2
-rw-r--r--net/rds/page.c2
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c4
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--net/xfrm/xfrm_state.c57
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/basic/fixdep.c3
-rw-r--r--scripts/dtc/.gitignore5
-rwxr-xr-xscripts/kernel-doc4
-rw-r--r--scripts/mod/sumversion.c2
-rw-r--r--scripts/module-common.lds8
-rw-r--r--scripts/package/builddeb2
-rw-r--r--scripts/pnmtologo.c4
-rw-r--r--security/capability.c2
-rw-r--r--security/commoncap.c4
-rw-r--r--security/integrity/ima/ima_main.c29
-rw-r--r--security/integrity/ima/ima_queue.c3
-rw-r--r--security/keys/proc.c4
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/avc.c8
-rw-r--r--security/selinux/hooks.c24
-rw-r--r--security/selinux/include/avc.h6
-rw-r--r--security/selinux/ss/services.c142
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/tomoyo/common.c30
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/domain.c42
-rw-r--r--security/tomoyo/tomoyo.c10
-rw-r--r--security/tomoyo/tomoyo.h3
-rw-r--r--sound/core/vmaster.c8
-rw-r--r--sound/isa/cmi8330.c75
-rw-r--r--sound/oss/kahlua.c2
-rw-r--r--sound/oss/mpu401.c16
-rw-r--r--sound/pci/atiixp.c8
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au8810.c3
-rw-r--r--sound/pci/au88x0/au8820.c3
-rw-r--r--sound/pci/au88x0/au8830.c3
-rw-r--r--sound/pci/ca0106/ca0106_main.c2
-rw-r--r--sound/pci/cmipci.c10
-rw-r--r--sound/pci/cs4281.c2
-rw-r--r--sound/pci/cs46xx/cs46xx.c6
-rw-r--r--sound/pci/emu10k1/emu10k1.c6
-rw-r--r--sound/pci/emu10k1/emu10k1x.c2
-rw-r--r--sound/pci/ens1370.c8
-rw-r--r--sound/pci/es1938.c2
-rw-r--r--sound/pci/hda/Kconfig14
-rw-r--r--sound/pci/hda/hda_beep.c4
-rw-r--r--sound/pci/hda/hda_codec.c33
-rw-r--r--sound/pci/hda/hda_codec.h10
-rw-r--r--sound/pci/hda/hda_hwdep.c236
-rw-r--r--sound/pci/hda/hda_intel.c55
-rw-r--r--sound/pci/hda/hda_local.h1
-rw-r--r--sound/pci/hda/patch_analog.c156
-rw-r--r--sound/pci/hda/patch_ca0110.c2
-rw-r--r--sound/pci/hda/patch_realtek.c1907
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/ice1712/ice1712.h9
-rw-r--r--sound/pci/ice1712/ice1724.c114
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c46
-rw-r--r--sound/pci/intel8x0.c46
-rw-r--r--sound/pci/intel8x0m.c34
-rw-r--r--sound/pci/lx6464es/lx6464es.c7
-rw-r--r--sound/pci/mixart/mixart.c2
-rw-r--r--sound/pci/nm256/nm256.c6
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c28
-rw-r--r--sound/pci/oxygen/virtuoso.c2
-rw-r--r--sound/pci/rme32.c9
-rw-r--r--sound/pci/rme96.c12
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/via82xx.c4
-rw-r--r--sound/pci/via82xx_modem.c2
-rw-r--r--sound/pci/ymfpci/ymfpci.c12
-rw-r--r--sound/soc/blackfin/bf5xx-ad73311.c16
-rw-r--r--sound/soc/blackfin/bf5xx-ssm2602.c16
-rw-r--r--sound/soc/codecs/Kconfig8
-rw-r--r--sound/soc/codecs/Makefile4
-rw-r--r--sound/soc/codecs/twl4030.c50
-rw-r--r--sound/soc/codecs/uda1380.c313
-rw-r--r--sound/soc/codecs/uda1380.h8
-rw-r--r--sound/soc/codecs/wm8350.c40
-rw-r--r--sound/soc/codecs/wm8400.c17
-rw-r--r--sound/soc/codecs/wm8523.c755
-rw-r--r--sound/soc/codecs/wm8523.h160
-rw-r--r--sound/soc/codecs/wm8580.c55
-rw-r--r--sound/soc/codecs/wm8731.c39
-rw-r--r--sound/soc/codecs/wm8753.c35
-rw-r--r--sound/soc/codecs/wm8900.c30
-rw-r--r--sound/soc/codecs/wm8903.c27
-rw-r--r--sound/soc/codecs/wm8940.c17
-rw-r--r--sound/soc/codecs/wm8960.c17
-rw-r--r--sound/soc/codecs/wm8961.c1326
-rw-r--r--sound/soc/codecs/wm8961.h866
-rw-r--r--sound/soc/codecs/wm8988.c34
-rw-r--r--sound/soc/codecs/wm9081.c17
-rw-r--r--sound/soc/fsl/Kconfig6
-rw-r--r--sound/soc/omap/Kconfig7
-rw-r--r--sound/soc/omap/Makefile2
-rw-r--r--sound/soc/omap/omap-pcm.c11
-rw-r--r--sound/soc/omap/sdp3430.c2
-rw-r--r--sound/soc/omap/zoom2.c301
-rw-r--r--sound/soc/pxa/magician.c54
-rw-r--r--sound/soc/pxa/pxa-ssp.c50
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c7
-rw-r--r--sound/soc/soc-core.c105
-rw-r--r--sound/soc/soc-dapm.c348
-rw-r--r--sound/soc/txx9/txx9aclc.c10
-rw-r--r--sound/sound_core.c5
-rw-r--r--sound/usb/caiaq/device.c10
-rw-r--r--sound/usb/usbmixer.c13
-rw-r--r--sound/usb/usx2y/us122l.c2
-rw-r--r--sound/usb/usx2y/usbusx2y.c2
-rw-r--r--tools/perf/CREDITS30
-rw-r--r--tools/perf/Documentation/perf-report.txt14
-rw-r--r--tools/perf/Documentation/perf-stat.txt6
-rw-r--r--tools/perf/Makefile26
-rw-r--r--tools/perf/builtin-annotate.c77
-rw-r--r--tools/perf/builtin-help.c6
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-record.c131
-rw-r--r--tools/perf/builtin-report.c595
-rw-r--r--tools/perf/builtin-stat.c216
-rw-r--r--tools/perf/builtin-top.c81
-rw-r--r--tools/perf/perf.c5
-rw-r--r--tools/perf/perf.h27
-rw-r--r--tools/perf/util/alias.c2
-rw-r--r--tools/perf/util/cache.h1
-rw-r--r--tools/perf/util/callchain.c335
-rw-r--r--tools/perf/util/callchain.h54
-rw-r--r--tools/perf/util/color.c37
-rw-r--r--tools/perf/util/color.h5
-rw-r--r--tools/perf/util/config.c18
-rw-r--r--tools/perf/util/exec_cmd.c5
-rw-r--r--tools/perf/util/header.c242
-rw-r--r--tools/perf/util/header.h37
-rw-r--r--tools/perf/util/help.c41
-rw-r--r--tools/perf/util/help.h6
-rw-r--r--tools/perf/util/include/asm/system.h1
-rw-r--r--tools/perf/util/include/linux/kernel.h21
-rw-r--r--tools/perf/util/include/linux/list.h18
-rw-r--r--tools/perf/util/include/linux/module.h6
-rw-r--r--tools/perf/util/include/linux/poison.h1
-rw-r--r--tools/perf/util/include/linux/prefetch.h6
-rw-r--r--tools/perf/util/include/linux/rbtree.h1
-rw-r--r--tools/perf/util/list.h603
-rw-r--r--tools/perf/util/module.c509
-rw-r--r--tools/perf/util/module.h53
-rw-r--r--tools/perf/util/pager.c5
-rw-r--r--tools/perf/util/parse-events.c378
-rw-r--r--tools/perf/util/parse-options.c5
-rw-r--r--tools/perf/util/parse-options.h27
-rw-r--r--tools/perf/util/quote.c46
-rw-r--r--tools/perf/util/quote.h2
-rw-r--r--tools/perf/util/rbtree.c383
-rw-r--r--tools/perf/util/rbtree.h171
-rw-r--r--tools/perf/util/run-command.c95
-rw-r--r--tools/perf/util/run-command.h5
-rw-r--r--tools/perf/util/strbuf.c15
-rw-r--r--tools/perf/util/strbuf.h10
-rw-r--r--tools/perf/util/string.h2
-rw-r--r--tools/perf/util/strlist.c184
-rw-r--r--tools/perf/util/strlist.h32
-rw-r--r--tools/perf/util/symbol.c183
-rw-r--r--tools/perf/util/symbol.h14
-rw-r--r--tools/perf/util/types.h (renamed from tools/perf/types.h)0
-rw-r--r--tools/perf/util/util.h15
-rw-r--r--tools/perf/util/wrapper.c5
-rw-r--r--virt/kvm/Kconfig14
-rw-r--r--virt/kvm/coalesced_mmio.c68
-rw-r--r--virt/kvm/coalesced_mmio.h1
-rw-r--r--virt/kvm/eventfd.c329
-rw-r--r--virt/kvm/ioapic.c76
-rw-r--r--virt/kvm/iodev.h55
-rw-r--r--virt/kvm/irq_comm.c45
-rw-r--r--virt/kvm/kvm_main.c255
-rw-r--r--virt/kvm/kvm_trace.c285
1658 files changed, 65037 insertions, 27505 deletions
diff --git a/.gitignore b/.gitignore
index cecb3b040cc1..b93fb7eff942 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,6 +27,7 @@
*.gz
*.lzma
*.patch
+*.gcno
#
# Top-level generic files
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 9f711d2df91b..d2b85237c76e 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -743,3 +743,80 @@ Revised:
RCU, realtime RCU, sleepable RCU, performance.
"
}
+
+@article{PaulEMcKenney2008RCUOSR
+,author="Paul E. McKenney and Jonathan Walpole"
+,title="Introducing technology into the {Linux} kernel: a case study"
+,Year="2008"
+,journal="SIGOPS Oper. Syst. Rev."
+,volume="42"
+,number="5"
+,pages="4--17"
+,issn="0163-5980"
+,doi={http://doi.acm.org/10.1145/1400097.1400099}
+,publisher="ACM"
+,address="New York, NY, USA"
+,annotation={
+ Linux changed RCU to a far greater degree than RCU has changed Linux.
+}
+}
+
+@unpublished{PaulEMcKenney2008HierarchicalRCU
+,Author="Paul E. McKenney"
+,Title="Hierarchical {RCU}"
+,month="November"
+,day="3"
+,year="2008"
+,note="Available:
+\url{http://lwn.net/Articles/305782/}
+[Viewed November 6, 2008]"
+,annotation="
+ RCU with combining-tree-based grace-period detection,
+ permitting it to handle thousands of CPUs.
+"
+}
+
+@conference{PaulEMcKenney2009MaliciousURCU
+,Author="Paul E. McKenney"
+,Title="Using a Malicious User-Level {RCU} to Torture {RCU}-Based Algorithms"
+,Booktitle="linux.conf.au 2009"
+,month="January"
+,year="2009"
+,address="Hobart, Australia"
+,note="Available:
+\url{http://www.rdrop.com/users/paulmck/RCU/urcutorture.2009.01.22a.pdf}
+[Viewed February 2, 2009]"
+,annotation="
+ Realtime RCU and torture-testing RCU uses.
+"
+}
+
+@unpublished{MathieuDesnoyers2009URCU
+,Author="Mathieu Desnoyers"
+,Title="[{RFC} git tree] Userspace {RCU} (urcu) for {Linux}"
+,month="February"
+,day="5"
+,year="2009"
+,note="Available:
+\url{http://lkml.org/lkml/2009/2/5/572}
+\url{git://lttng.org/userspace-rcu.git}
+[Viewed February 20, 2009]"
+,annotation="
+ Mathieu Desnoyers's user-space RCU implementation.
+ git://lttng.org/userspace-rcu.git
+"
+}
+
+@unpublished{PaulEMcKenney2009BloatWatchRCU
+,Author="Paul E. McKenney"
+,Title="{RCU}: The {Bloatwatch} Edition"
+,month="March"
+,day="17"
+,year="2009"
+,note="Available:
+\url{http://lwn.net/Articles/323929/}
+[Viewed March 20, 2009]"
+,annotation="
+ Uniprocessor assumptions allow simplified RCU implementation.
+"
+}
diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt
index aab4a9ec3931..90ec5341ee98 100644
--- a/Documentation/RCU/UP.txt
+++ b/Documentation/RCU/UP.txt
@@ -2,14 +2,13 @@ RCU on Uniprocessor Systems
A common misconception is that, on UP systems, the call_rcu() primitive
-may immediately invoke its function, and that the synchronize_rcu()
-primitive may return immediately. The basis of this misconception
+may immediately invoke its function. The basis of this misconception
is that since there is only one CPU, it should not be necessary to
wait for anything else to get done, since there are no other CPUs for
anything else to be happening on. Although this approach will -sort- -of-
work a surprising amount of the time, it is a very bad idea in general.
-This document presents three examples that demonstrate exactly how bad an
-idea this is.
+This document presents three examples that demonstrate exactly how bad
+an idea this is.
Example 1: softirq Suicide
@@ -82,11 +81,18 @@ Quick Quiz #2: What locking restriction must RCU callbacks respect?
Summary
-Permitting call_rcu() to immediately invoke its arguments or permitting
-synchronize_rcu() to immediately return breaks RCU, even on a UP system.
-So do not do it! Even on a UP system, the RCU infrastructure -must-
-respect grace periods, and -must- invoke callbacks from a known environment
-in which no locks are held.
+Permitting call_rcu() to immediately invoke its arguments breaks RCU,
+even on a UP system. So do not do it! Even on a UP system, the RCU
+infrastructure -must- respect grace periods, and -must- invoke callbacks
+from a known environment in which no locks are held.
+
+It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
+immediately on an UP system. It is also safe for synchronize_rcu()
+to return immediately on UP systems, except when running preemptable
+RCU.
+
+Quick Quiz #3: Why can't synchronize_rcu() return immediately on
+ UP systems running preemptable RCU?
Answer to Quick Quiz #1:
@@ -117,3 +123,13 @@ Answer to Quick Quiz #2:
callbacks acquire locks directly. However, a great many RCU
callbacks do acquire locks -indirectly-, for example, via
the kfree() primitive.
+
+Answer to Quick Quiz #3:
+ Why can't synchronize_rcu() return immediately on UP systems
+ running preemptable RCU?
+
+ Because some other task might have been preempted in the middle
+ of an RCU read-side critical section. If synchronize_rcu()
+ simply immediately returned, it would prematurely signal the
+ end of the grace period, which would come as a nasty shock to
+ that other thread when it started running again.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index accfe2f5247d..51525a30e8b4 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -11,7 +11,10 @@ over a rather long period of time, but improvements are always welcome!
structure is updated more than about 10% of the time, then
you should strongly consider some other approach, unless
detailed performance measurements show that RCU is nonetheless
- the right tool for the job.
+ the right tool for the job. Yes, you might think of RCU
+ as simply cutting overhead off of the readers and imposing it
+ on the writers. That is exactly why normal uses of RCU will
+ do much more reading than updating.
Another exception is where performance is not an issue, and RCU
provides a simpler implementation. An example of this situation
@@ -240,10 +243,11 @@ over a rather long period of time, but improvements are always welcome!
instead need to use synchronize_irq() or synchronize_sched().
12. Any lock acquired by an RCU callback must be acquired elsewhere
- with irq disabled, e.g., via spin_lock_irqsave(). Failing to
- disable irq on a given acquisition of that lock will result in
- deadlock as soon as the RCU callback happens to interrupt that
- acquisition's critical section.
+ with softirq disabled, e.g., via spin_lock_irqsave(),
+ spin_lock_bh(), etc. Failing to disable irq on a given
+ acquisition of that lock will result in deadlock as soon as the
+ RCU callback happens to interrupt that acquisition's critical
+ section.
13. RCU callbacks can be and are executed in parallel. In many cases,
the callback code simply wrappers around kfree(), so that this
@@ -310,3 +314,9 @@ over a rather long period of time, but improvements are always welcome!
Because these primitives only wait for pre-existing readers,
it is the caller's responsibility to guarantee safety to
any subsequent readers.
+
+16. The various RCU read-side primitives do -not- contain memory
+ barriers. The CPU (and in some cases, the compiler) is free
+ to reorder code into and out of RCU read-side critical sections.
+ It is the responsibility of the RCU update-side primitives to
+ deal with this.
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index 909602d409bb..e439a0edee22 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -170,6 +170,13 @@ module invokes call_rcu() from timers, you will need to first cancel all
the timers, and only then invoke rcu_barrier() to wait for any remaining
RCU callbacks to complete.
+Of course, if you module uses call_rcu_bh(), you will need to invoke
+rcu_barrier_bh() before unloading. Similarly, if your module uses
+call_rcu_sched(), you will need to invoke rcu_barrier_sched() before
+unloading. If your module uses call_rcu(), call_rcu_bh(), -and-
+call_rcu_sched(), then you will need to invoke each of rcu_barrier(),
+rcu_barrier_bh(), and rcu_barrier_sched().
+
Implementing rcu_barrier()
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index a342b6e1cc10..9dba3bb90e60 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -76,8 +76,10 @@ torture_type The type of RCU to test: "rcu" for the rcu_read_lock() API,
"rcu_sync" for rcu_read_lock() with synchronous reclamation,
"rcu_bh" for the rcu_read_lock_bh() API, "rcu_bh_sync" for
rcu_read_lock_bh() with synchronous reclamation, "srcu" for
- the "srcu_read_lock()" API, and "sched" for the use of
- preempt_disable() together with synchronize_sched().
+ the "srcu_read_lock()" API, "sched" for the use of
+ preempt_disable() together with synchronize_sched(),
+ and "sched_expedited" for the use of preempt_disable()
+ with synchronize_sched_expedited().
verbose Enable debug printk()s. Default is disabled.
@@ -162,6 +164,23 @@ of the "old" and "current" counters for the corresponding CPU. The
"idx" value maps the "old" and "current" values to the underlying array,
and is useful for debugging.
+Similarly, sched_expedited RCU provides the following:
+
+ sched_expedited-torture: rtc: d0000000016c1880 ver: 1090796 tfle: 0 rta: 1090796 rtaf: 0 rtf: 1090787 rtmbe: 0 nt: 27713319
+ sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
+ sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
+ sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
+ state: -1 / 0:0 3:0 4:0
+
+As before, the first four lines are similar to those for RCU.
+The last line shows the task-migration state. The first number is
+-1 if synchronize_sched_expedited() is idle, -2 if in the process of
+posting wakeups to the migration kthreads, and N when waiting on CPU N.
+Each of the colon-separated fields following the "/" is a CPU:state pair.
+Valid states are "0" for idle, "1" for waiting for quiescent state,
+"2" for passed through quiescent state, and "3" when a race with a
+CPU-hotplug event forces use of the synchronize_sched() primitive.
+
USAGE
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 96170824a717..97ded2432c59 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -785,6 +785,7 @@ RCU pointer/list traversal:
rcu_dereference
list_for_each_entry_rcu
hlist_for_each_entry_rcu
+ hlist_nulls_for_each_entry_rcu
list_for_each_continue_rcu (to be deprecated in favor of new
list_for_each_entry_continue_rcu)
@@ -807,19 +808,23 @@ RCU: Critical sections Grace period Barrier
rcu_read_lock synchronize_net rcu_barrier
rcu_read_unlock synchronize_rcu
+ synchronize_rcu_expedited
call_rcu
bh: Critical sections Grace period Barrier
rcu_read_lock_bh call_rcu_bh rcu_barrier_bh
- rcu_read_unlock_bh
+ rcu_read_unlock_bh synchronize_rcu_bh
+ synchronize_rcu_bh_expedited
sched: Critical sections Grace period Barrier
- [preempt_disable] synchronize_sched rcu_barrier_sched
- [and friends] call_rcu_sched
+ rcu_read_lock_sched synchronize_sched rcu_barrier_sched
+ rcu_read_unlock_sched call_rcu_sched
+ [preempt_disable] synchronize_sched_expedited
+ [and friends]
SRCU: Critical sections Grace period Barrier
@@ -827,6 +832,9 @@ SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu N/A
srcu_read_unlock
+SRCU: Initialization/cleanup
+ init_srcu_struct
+ cleanup_srcu_struct
See the comment headers in the source code (or the docbook generated
from them) for more information.
diff --git a/Documentation/block/data-integrity.txt b/Documentation/block/data-integrity.txt
index e8ca040ba2cf..2d735b0ae383 100644
--- a/Documentation/block/data-integrity.txt
+++ b/Documentation/block/data-integrity.txt
@@ -50,7 +50,7 @@ encouraged them to allow separation of the data and integrity metadata
scatter-gather lists.
The controller will interleave the buffers on write and split them on
-read. This means that the Linux can DMA the data buffers to and from
+read. This means that Linux can DMA the data buffers to and from
host memory without changes to the page cache.
Also, the 16-bit CRC checksum mandated by both the SCSI and SATA specs
@@ -66,7 +66,7 @@ software RAID5).
The IP checksum is weaker than the CRC in terms of detecting bit
errors. However, the strength is really in the separation of the data
-buffers and the integrity metadata. These two distinct buffers much
+buffers and the integrity metadata. These two distinct buffers must
match up for an I/O to complete.
The separation of the data and integrity metadata buffers as well as
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index f9ca389dddf4..1d7e9784439a 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -777,6 +777,18 @@ in cpuset directories:
# /bin/echo 1-4 > cpus -> set cpus list to cpus 1,2,3,4
# /bin/echo 1,2,3,4 > cpus -> set cpus list to cpus 1,2,3,4
+To add a CPU to a cpuset, write the new list of CPUs including the
+CPU to be added. To add 6 to the above cpuset:
+
+# /bin/echo 1-4,6 > cpus -> set cpus list to cpus 1,2,3,4,6
+
+Similarly to remove a CPU from a cpuset, write the new list of CPUs
+without the CPU to be removed.
+
+To remove all the CPUs:
+
+# /bin/echo "" > cpus -> clear cpus list
+
2.3 Setting flags
-----------------
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index a52adfc9a57f..3d1b0ab70c8e 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -25,7 +25,7 @@ use IO::Handle;
"tda10046lifeview", "av7110", "dec2000t", "dec2540t",
"dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
"or51211", "or51132_qam", "or51132_vsb", "bluebird",
- "opera1", "cx231xx", "cx18", "cx23885", "pvrusb2" );
+ "opera1", "cx231xx", "cx18", "cx23885", "pvrusb2", "mpc718" );
# Check args
syntax() if (scalar(@ARGV) != 1);
@@ -381,6 +381,57 @@ sub cx18 {
$allfiles;
}
+sub mpc718 {
+ my $archive = 'Yuan MPC718 TV Tuner Card 2.13.10.1016.zip';
+ my $url = "ftp://ftp.work.acer-euro.com/desktop/aspire_idea510/vista/Drivers/$archive";
+ my $fwfile = "dvb-cx18-mpc718-mt352.fw";
+ my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
+
+ checkstandard();
+ wgetfile($archive, $url);
+ unzip($archive, $tmpdir);
+
+ my $sourcefile = "$tmpdir/Yuan MPC718 TV Tuner Card 2.13.10.1016/mpc718_32bit/yuanrap.sys";
+ my $found = 0;
+
+ open IN, '<', $sourcefile or die "Couldn't open $sourcefile to extract $fwfile data\n";
+ binmode IN;
+ open OUT, '>', $fwfile;
+ binmode OUT;
+ {
+ # Block scope because we change the line terminator variable $/
+ my $prevlen = 0;
+ my $currlen;
+
+ # Buried in the data segment are 3 runs of almost identical
+ # register-value pairs that end in 0x5d 0x01 which is a "TUNER GO"
+ # command for the MT352.
+ # Pull out the middle run (because it's easy) of register-value
+ # pairs to make the "firmware" file.
+
+ local $/ = "\x5d\x01"; # MT352 "TUNER GO"
+
+ while (<IN>) {
+ $currlen = length($_);
+ if ($prevlen == $currlen && $currlen <= 64) {
+ chop; chop; # Get rid of "TUNER GO"
+ s/^\0\0//; # get rid of leading 00 00 if it's there
+ printf OUT "$_";
+ $found = 1;
+ last;
+ }
+ $prevlen = $currlen;
+ }
+ }
+ close OUT;
+ close IN;
+ if (!$found) {
+ unlink $fwfile;
+ die "Couldn't find valid register-value sequence in $sourcefile for $fwfile\n";
+ }
+ $fwfile;
+}
+
sub cx23885 {
my $url = "http://linuxtv.org/downloads/firmware/";
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index f8cd450be9aa..0a67c537dda6 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -394,15 +394,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
-----------------------------
-What: obsolete generic irq defines and typedefs
-When: 2.6.30
-Why: The defines and typedefs (hw_interrupt_type, no_irq_type, irq_desc_t)
- have been kept around for migration reasons. After more than two years
- it's time to remove them finally
-Who: Thomas Gleixner <tglx@linutronix.de>
-
----------------------------
-
What: fakephp and associated sysfs files in /sys/bus/pci/slots/
When: 2011
Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
@@ -449,6 +440,13 @@ When: 2.6.33
Why: Should be implemented in userspace, policy daemon.
Who: Johannes Berg <johannes@sipsolutions.net>
+---------------------------
+
+What: CONFIG_INOTIFY
+When: 2.6.33
+Why: No known users, fsnotify more generic and more easily maintained.
+Who: Eric Paris <eparis@redhat.com>
+
----------------------------
What: CONFIG_X86_OLD_MCE
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index b843743aa0b5..0d15ebccf5b0 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -46,7 +46,7 @@ better to do. The file is seekable, in that one can do something like the
following:
dd if=/proc/sequence of=out1 count=1
- dd if=/proc/sequence skip=1 out=out2 count=1
+ dd if=/proc/sequence skip=1 of=out2 count=1
Then concatenate the output files out1 and out2 and get the right
result. Yes, it is a thoroughly useless module, but the point is to show
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt
index e716aadb3a33..40ec63352760 100644
--- a/Documentation/gcov.txt
+++ b/Documentation/gcov.txt
@@ -188,13 +188,18 @@ Solution: Exclude affected source files from profiling by specifying
GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the
corresponding Makefile.
+Problem: Files copied from sysfs appear empty or incomplete.
+Cause: Due to the way seq_file works, some tools such as cp or tar
+ may not correctly copy files from sysfs.
+Solution: Use 'cat' to read .gcda files and 'cp -d' to copy links.
+ Alternatively use the mechanism shown in Appendix B.
+
Appendix A: gather_on_build.sh
==============================
Sample script to gather coverage meta files on the build machine
(see 6a):
-
#!/bin/bash
KSRC=$1
@@ -226,7 +231,7 @@ Appendix B: gather_on_test.sh
Sample script to gather coverage data files on the test machine
(see 6b):
-#!/bin/bash
+#!/bin/bash -e
DEST=$1
GCDA=/sys/kernel/debug/gcov
@@ -236,11 +241,13 @@ if [ -z "$DEST" ] ; then
exit 1
fi
-find $GCDA -name '*.gcno' -o -name '*.gcda' | tar cfz $DEST -T -
+TEMPDIR=$(mktemp -d)
+echo Collecting data..
+find $GCDA -type d -exec mkdir -p $TEMPDIR/\{\} \;
+find $GCDA -name '*.gcda' -exec sh -c 'cat < $0 > '$TEMPDIR'/$0' {} \;
+find $GCDA -name '*.gcno' -exec sh -c 'cp -d $0 '$TEMPDIR'/$0' {} \;
+tar czf $DEST -C $TEMPDIR sys
+rm -rf $TEMPDIR
-if [ $? -eq 0 ] ; then
- echo "$DEST successfully created, copy to build system and unpack with:"
- echo " tar xfz $DEST"
-else
- echo "Could not create file $DEST"
-fi
+echo "$DEST successfully created, copy to build system and unpack with:"
+echo " tar xfz $DEST"
diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt
new file mode 100644
index 000000000000..cb61516483d3
--- /dev/null
+++ b/Documentation/hwlat_detector.txt
@@ -0,0 +1,64 @@
+Introduction:
+-------------
+
+The module hwlat_detector is a special purpose kernel module that is used to
+detect large system latencies induced by the behavior of certain underlying
+hardware or firmware, independent of Linux itself. The code was developed
+originally to detect SMIs (System Management Interrupts) on x86 systems,
+however there is nothing x86 specific about this patchset. It was
+originally written for use by the "RT" patch since the Real Time
+kernel is highly latency sensitive.
+
+SMIs are usually not serviced by the Linux kernel, which typically does not
+even know that they are occuring. SMIs are instead are set up by BIOS code
+and are serviced by BIOS code, usually for "critical" events such as
+management of thermal sensors and fans. Sometimes though, SMIs are used for
+other tasks and those tasks can spend an inordinate amount of time in the
+handler (sometimes measured in milliseconds). Obviously this is a problem if
+you are trying to keep event service latencies down in the microsecond range.
+
+The hardware latency detector works by hogging all of the cpus for configurable
+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
+for some period, then looking for gaps in the TSC data. Any gap indicates a
+time when the polling was interrupted and since the machine is stopped and
+interrupts turned off the only thing that could do that would be an SMI.
+
+Note that the SMI detector should *NEVER* be used in a production environment.
+It is intended to be run manually to determine if the hardware platform has a
+problem with long system firmware service routines.
+
+Usage:
+------
+
+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
+step required to start the hwlat_detector. It is possible to redefine the
+threshold in microseconds (us) above which latency spikes will be taken
+into account (parameter "threshold=").
+
+Example:
+
+ # modprobe hwlat_detector enabled=1 threshold=100
+
+After the module is loaded, it creates a directory named "hwlat_detector" under
+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
+to have debugfs mounted, which might be on /sys/debug on your system.
+
+The /debug/hwlat_detector interface contains the following files:
+
+count - number of latency spikes observed since last reset
+enable - a global enable/disable toggle (0/1), resets count
+max - maximum hardware latency actually observed (usecs)
+sample - a pipe from which to read current raw sample data
+ in the format <timestamp> <latency observed usecs>
+ (can be opened O_NONBLOCK for a single sample)
+threshold - minimum latency value to be considered (usecs)
+width - time period to sample with CPUs held (usecs)
+ must be less than the total window size (enforced)
+window - total period of sampling, width being inside (usecs)
+
+By default we will set width to 500,000 and window to 1,000,000, meaning that
+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
+observe any latencies that exceed the threshold (initially 100 usecs),
+then we write to a global sample ring buffer of 8K samples, which is
+consumed by reading from the "sample" (pipe) debugfs file interface.
diff --git a/Documentation/hwmon/hpfall.c b/Documentation/hwmon/hpfall.c
index bbea1ccfd46a..d2f6711b468b 100644
--- a/Documentation/hwmon/hpfall.c
+++ b/Documentation/hwmon/hpfall.c
@@ -57,45 +57,43 @@ void ignore_me(void)
{
protect(0);
set_led(0);
-
}
-int main(int argc, char* argv[])
+int main(int argc, char *argv[])
{
- int fd, ret;
+ int fd, ret;
- fd = open("/dev/freefall", O_RDONLY);
- if (fd < 0) {
- perror("open");
- return EXIT_FAILURE;
- }
+ fd = open("/dev/freefall", O_RDONLY);
+ if (fd < 0) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
signal(SIGALRM, ignore_me);
- for (;;) {
- unsigned char count;
-
- ret = read(fd, &count, sizeof(count));
- alarm(0);
- if ((ret == -1) && (errno == EINTR)) {
- /* Alarm expired, time to unpark the heads */
- continue;
- }
-
- if (ret != sizeof(count)) {
- perror("read");
- break;
- }
-
- protect(21);
- set_led(1);
- if (1 || on_ac() || lid_open()) {
- alarm(2);
- } else {
- alarm(20);
- }
- }
+ for (;;) {
+ unsigned char count;
+
+ ret = read(fd, &count, sizeof(count));
+ alarm(0);
+ if ((ret == -1) && (errno == EINTR)) {
+ /* Alarm expired, time to unpark the heads */
+ continue;
+ }
+
+ if (ret != sizeof(count)) {
+ perror("read");
+ break;
+ }
+
+ protect(21);
+ set_led(1);
+ if (1 || on_ac() || lid_open())
+ alarm(2);
+ else
+ alarm(20);
+ }
- close(fd);
- return EXIT_SUCCESS;
+ close(fd);
+ return EXIT_SUCCESS;
}
diff --git a/Documentation/hwmon/tmp421 b/Documentation/hwmon/tmp421
new file mode 100644
index 000000000000..0cf07f824741
--- /dev/null
+++ b/Documentation/hwmon/tmp421
@@ -0,0 +1,36 @@
+Kernel driver tmp421
+====================
+
+Supported chips:
+ * Texas Instruments TMP421
+ Prefix: 'tmp421'
+ Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+ * Texas Instruments TMP422
+ Prefix: 'tmp422'
+ Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+ * Texas Instruments TMP423
+ Prefix: 'tmp423'
+ Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
+Authors:
+ Andre Prendel <andre.prendel@gmx.de>
+
+Description
+-----------
+
+This driver implements support for Texas Instruments TMP421, TMP422
+and TMP423 temperature sensor chips. These chips implement one local
+and up to one (TMP421), up to two (TMP422) or up to three (TMP423)
+remote sensors. Temperature is measured in degrees Celsius. The chips
+are wired over I2C/SMBus and specified over a temperature range of -40
+to +125 degrees Celsius. Resolution for both the local and remote
+channels is 0.0625 degree C.
+
+The chips support only temperature measurement. The driver exports
+the temperature values via the following sysfs files:
+
+temp[1-4]_input
+temp[2-4]_fault
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d08759aa0903..6f98ed70253d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1915,6 +1915,12 @@ and is between 256 and 4096 characters. It is defined in the file
Format: { 0 | 1 }
See arch/parisc/kernel/pdc_chassis.c
+ percpu_alloc= [X86] Select which percpu first chunk allocator to use.
+ Allowed values are one of "lpage", "embed" and "4k".
+ See comments in arch/x86/kernel/setup_percpu.c for
+ details on each allocator. This parameter is primarily
+ for debugging and performance comparison.
+
pf. [PARIDE]
See Documentation/blockdev/paride.txt.
@@ -2467,7 +2473,13 @@ and is between 256 and 4096 characters. It is defined in the file
tp720= [HW,PS2]
- trace_buf_size=nn[KMG] [ftrace] will set tracing buffer size.
+ trace_buf_size=nn[KMG]
+ [FTRACE] will set tracing buffer size.
+
+ trace_event=[event-list]
+ [FTRACE] Set and start specified trace events in order
+ to facilitate early boot debugging.
+ See also Documentation/trace/events.txt
trix= [HW,OSS] MediaTrix AudioTrix Pro
Format:
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 0112da3b9ab8..89068030b01b 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -16,13 +16,17 @@ Usage
-----
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
-thread scans the memory every 10 minutes (by default) and prints any new
-unreferenced objects found. To trigger an intermediate scan and display
-all the possible memory leaks:
+thread scans the memory every 10 minutes (by default) and prints the
+number of new unreferenced objects found. To display the details of all
+the possible memory leaks:
# mount -t debugfs nodev /sys/kernel/debug/
# cat /sys/kernel/debug/kmemleak
+To trigger an intermediate memory scan:
+
+ # echo scan > /sys/kernel/debug/kmemleak
+
Note that the orphan objects are listed in the order they were allocated
and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan.
@@ -31,16 +35,21 @@ Memory scanning parameters can be modified at run-time by writing to the
/sys/kernel/debug/kmemleak file. The following parameters are supported:
off - disable kmemleak (irreversible)
- stack=on - enable the task stacks scanning
+ stack=on - enable the task stacks scanning (default)
stack=off - disable the tasks stacks scanning
- scan=on - start the automatic memory scanning thread
+ scan=on - start the automatic memory scanning thread (default)
scan=off - stop the automatic memory scanning thread
- scan=<secs> - set the automatic memory scanning period in seconds (0
- to disable it)
+ scan=<secs> - set the automatic memory scanning period in seconds
+ (default 600, 0 to stop the automatic scanning)
+ scan - trigger a memory scan
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
the kernel command line.
+Memory may be allocated or freed before kmemleak is initialised and
+these actions are stored in an early log buffer. The size of this buffer
+is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
+
Basic Algorithm
---------------
diff --git a/Documentation/leds-lp3944.txt b/Documentation/leds-lp3944.txt
new file mode 100644
index 000000000000..c6eda18b15ef
--- /dev/null
+++ b/Documentation/leds-lp3944.txt
@@ -0,0 +1,50 @@
+Kernel driver lp3944
+====================
+
+ * National Semiconductor LP3944 Fun-light Chip
+ Prefix: 'lp3944'
+ Addresses scanned: None (see the Notes section below)
+ Datasheet: Publicly available at the National Semiconductor website
+ http://www.national.com/pf/LP/LP3944.html
+
+Authors:
+ Antonio Ospite <ospite@studenti.unina.it>
+
+
+Description
+-----------
+The LP3944 is a helper chip that can drive up to 8 leds, with two programmable
+DIM modes; it could even be used as a gpio expander but this driver assumes it
+is used as a led controller.
+
+The DIM modes are used to set _blink_ patterns for leds, the pattern is
+specified supplying two parameters:
+ - period: from 0s to 1.6s
+ - duty cycle: percentage of the period the led is on, from 0 to 100
+
+Setting a led in DIM0 or DIM1 mode makes it blink according to the pattern.
+See the datasheet for details.
+
+LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+leds, the camera flash light and the lcds power.
+
+
+Notes
+-----
+The chip is used mainly in embedded contexts, so this driver expects it is
+registered using the i2c_board_info mechanism.
+
+To register the chip at address 0x60 on adapter 0, set the platform data
+according to include/linux/leds-lp3944.h, set the i2c board info:
+
+ static struct i2c_board_info __initdata a910_i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("lp3944", 0x60),
+ .platform_data = &a910_lp3944_leds,
+ },
+ };
+
+and register it in the platform init function
+
+ i2c_register_board_info(0, a910_i2c_board_info,
+ ARRAY_SIZE(a910_i2c_board_info));
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index c6cd4956047c..9f16c5178b66 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -76,6 +76,11 @@ STATUS - this attribute represents operating status (charging, full,
discharging (i.e. powering a load), etc.). This corresponds to
BATTERY_STATUS_* values, as defined in battery.h.
+CHARGE_TYPE - batteries can typically charge at different rates.
+This defines trickle and fast charges. For batteries that
+are already charged or discharging, 'n/a' can be displayed (or
+'unknown', if the status is not known).
+
HEALTH - represents health of the battery, values corresponds to
POWER_SUPPLY_HEALTH_*, defined in battery.h.
@@ -108,6 +113,8 @@ relative, time-based measurements.
ENERGY_FULL, ENERGY_EMPTY - same as above but for energy.
CAPACITY - capacity in percents.
+CAPACITY_LEVEL - capacity level. This corresponds to
+POWER_SUPPLY_CAPACITY_LEVEL_*.
TEMP - temperature of the power supply.
TEMP_AMBIENT - ambient temperature.
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 8d999d862d0e..79f533f38c61 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1238,1122 +1238,7 @@ descriptions for the SOC devices for which new nodes have been
defined; this list will expand as more and more SOC-containing
platforms are moved over to use the flattened-device-tree model.
- a) PHY nodes
-
- Required properties:
-
- - device_type : Should be "ethernet-phy"
- - interrupts : <a b> where a is the interrupt number and b is a
- field that represents an encoding of the sense and level
- information for the interrupt. This should be encoded based on
- the information in section 2) depending on the type of interrupt
- controller you have.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- - reg : The ID number for the phy, usually a small integer
- - linux,phandle : phandle for this node; likely referenced by an
- ethernet controller node.
-
-
- Example:
-
- ethernet-phy@0 {
- linux,phandle = <2452000>
- interrupt-parent = <40000>;
- interrupts = <35 1>;
- reg = <0>;
- device_type = "ethernet-phy";
- };
-
-
- b) Interrupt controllers
-
- Some SOC devices contain interrupt controllers that are different
- from the standard Open PIC specification. The SOC device nodes for
- these types of controllers should be specified just like a standard
- OpenPIC controller. Sense and level information should be encoded
- as specified in section 2) of this chapter for each device that
- specifies an interrupt.
-
- Example :
-
- pic@40000 {
- linux,phandle = <40000>;
- interrupt-controller;
- #address-cells = <0>;
- reg = <40000 40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- c) 4xx/Axon EMAC ethernet nodes
-
- The EMAC ethernet controller in IBM and AMCC 4xx chips, and also
- the Axon bridge. To operate this needs to interact with a ths
- special McMAL DMA controller, and sometimes an RGMII or ZMII
- interface. In addition to the nodes and properties described
- below, the node for the OPB bus on which the EMAC sits must have a
- correct clock-frequency property.
-
- i) The EMAC node itself
-
- Required properties:
- - device_type : "network"
-
- - compatible : compatible list, contains 2 entries, first is
- "ibm,emac-CHIP" where CHIP is the host ASIC (440gx,
- 405gp, Axon) and second is either "ibm,emac" or
- "ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
- "ibm,emac4"
- - interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
- - interrupt-parent : optional, if needed for interrupt mapping
- - reg : <registers mapping>
- - local-mac-address : 6 bytes, MAC address
- - mal-device : phandle of the associated McMAL node
- - mal-tx-channel : 1 cell, index of the tx channel on McMAL associated
- with this EMAC
- - mal-rx-channel : 1 cell, index of the rx channel on McMAL associated
- with this EMAC
- - cell-index : 1 cell, hardware index of the EMAC cell on a given
- ASIC (typically 0x0 and 0x1 for EMAC0 and EMAC1 on
- each Axon chip)
- - max-frame-size : 1 cell, maximum frame size supported in bytes
- - rx-fifo-size : 1 cell, Rx fifo size in bytes for 10 and 100 Mb/sec
- operations.
- For Axon, 2048
- - tx-fifo-size : 1 cell, Tx fifo size in bytes for 10 and 100 Mb/sec
- operations.
- For Axon, 2048.
- - fifo-entry-size : 1 cell, size of a fifo entry (used to calculate
- thresholds).
- For Axon, 0x00000010
- - mal-burst-size : 1 cell, MAL burst size (used to calculate thresholds)
- in bytes.
- For Axon, 0x00000100 (I think ...)
- - phy-mode : string, mode of operations of the PHY interface.
- Supported values are: "mii", "rmii", "smii", "rgmii",
- "tbi", "gmii", rtbi", "sgmii".
- For Axon on CAB, it is "rgmii"
- - mdio-device : 1 cell, required iff using shared MDIO registers
- (440EP). phandle of the EMAC to use to drive the
- MDIO lines for the PHY used by this EMAC.
- - zmii-device : 1 cell, required iff connected to a ZMII. phandle of
- the ZMII device node
- - zmii-channel : 1 cell, required iff connected to a ZMII. Which ZMII
- channel or 0xffffffff if ZMII is only used for MDIO.
- - rgmii-device : 1 cell, required iff connected to an RGMII. phandle
- of the RGMII device node.
- For Axon: phandle of plb5/plb4/opb/rgmii
- - rgmii-channel : 1 cell, required iff connected to an RGMII. Which
- RGMII channel is used by this EMAC.
- Fox Axon: present, whatever value is appropriate for each
- EMAC, that is the content of the current (bogus) "phy-port"
- property.
-
- Optional properties:
- - phy-address : 1 cell, optional, MDIO address of the PHY. If absent,
- a search is performed.
- - phy-map : 1 cell, optional, bitmap of addresses to probe the PHY
- for, used if phy-address is absent. bit 0x00000001 is
- MDIO address 0.
- For Axon it can be absent, though my current driver
- doesn't handle phy-address yet so for now, keep
- 0x00ffffff in it.
- - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
- operations (if absent the value is the same as
- rx-fifo-size). For Axon, either absent or 2048.
- - tx-fifo-size-gige : 1 cell, Tx fifo size in bytes for 1000 Mb/sec
- operations (if absent the value is the same as
- tx-fifo-size). For Axon, either absent or 2048.
- - tah-device : 1 cell, optional. If connected to a TAH engine for
- offload, phandle of the TAH device node.
- - tah-channel : 1 cell, optional. If appropriate, channel used on the
- TAH engine.
-
- Example:
-
- EMAC0: ethernet@40000800 {
- device_type = "network";
- compatible = "ibm,emac-440gp", "ibm,emac";
- interrupt-parent = <&UIC1>;
- interrupts = <1c 4 1d 4>;
- reg = <40000800 70>;
- local-mac-address = [00 04 AC E3 1B 1E];
- mal-device = <&MAL0>;
- mal-tx-channel = <0 1>;
- mal-rx-channel = <0>;
- cell-index = <0>;
- max-frame-size = <5dc>;
- rx-fifo-size = <1000>;
- tx-fifo-size = <800>;
- phy-mode = "rmii";
- phy-map = <00000001>;
- zmii-device = <&ZMII0>;
- zmii-channel = <0>;
- };
-
- ii) McMAL node
-
- Required properties:
- - device_type : "dma-controller"
- - compatible : compatible list, containing 2 entries, first is
- "ibm,mcmal-CHIP" where CHIP is the host ASIC (like
- emac) and the second is either "ibm,mcmal" or
- "ibm,mcmal2".
- For Axon, "ibm,mcmal-axon","ibm,mcmal2"
- - interrupts : <interrupt mapping for the MAL interrupts sources:
- 5 sources: tx_eob, rx_eob, serr, txde, rxde>.
- For Axon: This is _different_ from the current
- firmware. We use the "delayed" interrupts for txeob
- and rxeob. Thus we end up with mapping those 5 MPIC
- interrupts, all level positive sensitive: 10, 11, 32,
- 33, 34 (in decimal)
- - dcr-reg : < DCR registers range >
- - dcr-parent : if needed for dcr-reg
- - num-tx-chans : 1 cell, number of Tx channels
- - num-rx-chans : 1 cell, number of Rx channels
-
- iii) ZMII node
-
- Required properties:
- - compatible : compatible list, containing 2 entries, first is
- "ibm,zmii-CHIP" where CHIP is the host ASIC (like
- EMAC) and the second is "ibm,zmii".
- For Axon, there is no ZMII node.
- - reg : <registers mapping>
-
- iv) RGMII node
-
- Required properties:
- - compatible : compatible list, containing 2 entries, first is
- "ibm,rgmii-CHIP" where CHIP is the host ASIC (like
- EMAC) and the second is "ibm,rgmii".
- For Axon, "ibm,rgmii-axon","ibm,rgmii"
- - reg : <registers mapping>
- - revision : as provided by the RGMII new version register if
- available.
- For Axon: 0x0000012a
-
- d) Xilinx IP cores
-
- The Xilinx EDK toolchain ships with a set of IP cores (devices) for use
- in Xilinx Spartan and Virtex FPGAs. The devices cover the whole range
- of standard device types (network, serial, etc.) and miscellaneous
- devices (gpio, LCD, spi, etc). Also, since these devices are
- implemented within the fpga fabric every instance of the device can be
- synthesised with different options that change the behaviour.
-
- Each IP-core has a set of parameters which the FPGA designer can use to
- control how the core is synthesized. Historically, the EDK tool would
- extract the device parameters relevant to device drivers and copy them
- into an 'xparameters.h' in the form of #define symbols. This tells the
- device drivers how the IP cores are configured, but it requres the kernel
- to be recompiled every time the FPGA bitstream is resynthesized.
-
- The new approach is to export the parameters into the device tree and
- generate a new device tree each time the FPGA bitstream changes. The
- parameters which used to be exported as #defines will now become
- properties of the device node. In general, device nodes for IP-cores
- will take the following form:
-
- (name): (generic-name)@(base-address) {
- compatible = "xlnx,(ip-core-name)-(HW_VER)"
- [, (list of compatible devices), ...];
- reg = <(baseaddr) (size)>;
- interrupt-parent = <&interrupt-controller-phandle>;
- interrupts = < ... >;
- xlnx,(parameter1) = "(string-value)";
- xlnx,(parameter2) = <(int-value)>;
- };
-
- (generic-name): an open firmware-style name that describes the
- generic class of device. Preferably, this is one word, such
- as 'serial' or 'ethernet'.
- (ip-core-name): the name of the ip block (given after the BEGIN
- directive in system.mhs). Should be in lowercase
- and all underscores '_' converted to dashes '-'.
- (name): is derived from the "PARAMETER INSTANCE" value.
- (parameter#): C_* parameters from system.mhs. The C_ prefix is
- dropped from the parameter name, the name is converted
- to lowercase and all underscore '_' characters are
- converted to dashes '-'.
- (baseaddr): the baseaddr parameter value (often named C_BASEADDR).
- (HW_VER): from the HW_VER parameter.
- (size): the address range size (often C_HIGHADDR - C_BASEADDR + 1).
-
- Typically, the compatible list will include the exact IP core version
- followed by an older IP core version which implements the same
- interface or any other device with the same interface.
-
- 'reg', 'interrupt-parent' and 'interrupts' are all optional properties.
-
- For example, the following block from system.mhs:
-
- BEGIN opb_uartlite
- PARAMETER INSTANCE = opb_uartlite_0
- PARAMETER HW_VER = 1.00.b
- PARAMETER C_BAUDRATE = 115200
- PARAMETER C_DATA_BITS = 8
- PARAMETER C_ODD_PARITY = 0
- PARAMETER C_USE_PARITY = 0
- PARAMETER C_CLK_FREQ = 50000000
- PARAMETER C_BASEADDR = 0xEC100000
- PARAMETER C_HIGHADDR = 0xEC10FFFF
- BUS_INTERFACE SOPB = opb_7
- PORT OPB_Clk = CLK_50MHz
- PORT Interrupt = opb_uartlite_0_Interrupt
- PORT RX = opb_uartlite_0_RX
- PORT TX = opb_uartlite_0_TX
- PORT OPB_Rst = sys_bus_reset_0
- END
-
- becomes the following device tree node:
-
- opb_uartlite_0: serial@ec100000 {
- device_type = "serial";
- compatible = "xlnx,opb-uartlite-1.00.b";
- reg = <ec100000 10000>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <1 0>; // got this from the opb_intc parameters
- current-speed = <d#115200>; // standard serial device prop
- clock-frequency = <d#50000000>; // standard serial device prop
- xlnx,data-bits = <8>;
- xlnx,odd-parity = <0>;
- xlnx,use-parity = <0>;
- };
-
- Some IP cores actually implement 2 or more logical devices. In
- this case, the device should still describe the whole IP core with
- a single node and add a child node for each logical device. The
- ranges property can be used to translate from parent IP-core to the
- registers of each device. In addition, the parent node should be
- compatible with the bus type 'xlnx,compound', and should contain
- #address-cells and #size-cells, as with any other bus. (Note: this
- makes the assumption that both logical devices have the same bus
- binding. If this is not true, then separate nodes should be used
- for each logical device). The 'cell-index' property can be used to
- enumerate logical devices within an IP core. For example, the
- following is the system.mhs entry for the dual ps2 controller found
- on the ml403 reference design.
-
- BEGIN opb_ps2_dual_ref
- PARAMETER INSTANCE = opb_ps2_dual_ref_0
- PARAMETER HW_VER = 1.00.a
- PARAMETER C_BASEADDR = 0xA9000000
- PARAMETER C_HIGHADDR = 0xA9001FFF
- BUS_INTERFACE SOPB = opb_v20_0
- PORT Sys_Intr1 = ps2_1_intr
- PORT Sys_Intr2 = ps2_2_intr
- PORT Clkin1 = ps2_clk_rx_1
- PORT Clkin2 = ps2_clk_rx_2
- PORT Clkpd1 = ps2_clk_tx_1
- PORT Clkpd2 = ps2_clk_tx_2
- PORT Rx1 = ps2_d_rx_1
- PORT Rx2 = ps2_d_rx_2
- PORT Txpd1 = ps2_d_tx_1
- PORT Txpd2 = ps2_d_tx_2
- END
-
- It would result in the following device tree nodes:
-
- opb_ps2_dual_ref_0: opb-ps2-dual-ref@a9000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ranges = <0 a9000000 2000>;
- // If this device had extra parameters, then they would
- // go here.
- ps2@0 {
- compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
- reg = <0 40>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <3 0>;
- cell-index = <0>;
- };
- ps2@1000 {
- compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
- reg = <1000 40>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <3 0>;
- cell-index = <0>;
- };
- };
-
- Also, the system.mhs file defines bus attachments from the processor
- to the devices. The device tree structure should reflect the bus
- attachments. Again an example; this system.mhs fragment:
-
- BEGIN ppc405_virtex4
- PARAMETER INSTANCE = ppc405_0
- PARAMETER HW_VER = 1.01.a
- BUS_INTERFACE DPLB = plb_v34_0
- BUS_INTERFACE IPLB = plb_v34_0
- END
-
- BEGIN opb_intc
- PARAMETER INSTANCE = opb_intc_0
- PARAMETER HW_VER = 1.00.c
- PARAMETER C_BASEADDR = 0xD1000FC0
- PARAMETER C_HIGHADDR = 0xD1000FDF
- BUS_INTERFACE SOPB = opb_v20_0
- END
-
- BEGIN opb_uart16550
- PARAMETER INSTANCE = opb_uart16550_0
- PARAMETER HW_VER = 1.00.d
- PARAMETER C_BASEADDR = 0xa0000000
- PARAMETER C_HIGHADDR = 0xa0001FFF
- BUS_INTERFACE SOPB = opb_v20_0
- END
-
- BEGIN plb_v34
- PARAMETER INSTANCE = plb_v34_0
- PARAMETER HW_VER = 1.02.a
- END
-
- BEGIN plb_bram_if_cntlr
- PARAMETER INSTANCE = plb_bram_if_cntlr_0
- PARAMETER HW_VER = 1.00.b
- PARAMETER C_BASEADDR = 0xFFFF0000
- PARAMETER C_HIGHADDR = 0xFFFFFFFF
- BUS_INTERFACE SPLB = plb_v34_0
- END
-
- BEGIN plb2opb_bridge
- PARAMETER INSTANCE = plb2opb_bridge_0
- PARAMETER HW_VER = 1.01.a
- PARAMETER C_RNG0_BASEADDR = 0x20000000
- PARAMETER C_RNG0_HIGHADDR = 0x3FFFFFFF
- PARAMETER C_RNG1_BASEADDR = 0x60000000
- PARAMETER C_RNG1_HIGHADDR = 0x7FFFFFFF
- PARAMETER C_RNG2_BASEADDR = 0x80000000
- PARAMETER C_RNG2_HIGHADDR = 0xBFFFFFFF
- PARAMETER C_RNG3_BASEADDR = 0xC0000000
- PARAMETER C_RNG3_HIGHADDR = 0xDFFFFFFF
- BUS_INTERFACE SPLB = plb_v34_0
- BUS_INTERFACE MOPB = opb_v20_0
- END
-
- Gives this device tree (some properties removed for clarity):
-
- plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v34-1.02.a";
- device_type = "ibm,plb";
- ranges; // 1:1 translation
-
- plb_bram_if_cntrl_0: bram@ffff0000 {
- reg = <ffff0000 10000>;
- }
-
- opb@20000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <20000000 20000000 20000000
- 60000000 60000000 20000000
- 80000000 80000000 40000000
- c0000000 c0000000 20000000>;
-
- opb_uart16550_0: serial@a0000000 {
- reg = <a00000000 2000>;
- };
-
- opb_intc_0: interrupt-controller@d1000fc0 {
- reg = <d1000fc0 20>;
- };
- };
- };
-
- That covers the general approach to binding xilinx IP cores into the
- device tree. The following are bindings for specific devices:
-
- i) Xilinx ML300 Framebuffer
-
- Simple framebuffer device from the ML300 reference design (also on the
- ML403 reference design as well as others).
-
- Optional properties:
- - resolution = <xres yres> : pixel resolution of framebuffer. Some
- implementations use a different resolution.
- Default is <d#640 d#480>
- - virt-resolution = <xvirt yvirt> : Size of framebuffer in memory.
- Default is <d#1024 d#480>.
- - rotate-display (empty) : rotate display 180 degrees.
-
- ii) Xilinx SystemACE
-
- The Xilinx SystemACE device is used to program FPGAs from an FPGA
- bitstream stored on a CF card. It can also be used as a generic CF
- interface device.
-
- Optional properties:
- - 8-bit (empty) : Set this property for SystemACE in 8 bit mode
-
- iii) Xilinx EMAC and Xilinx TEMAC
-
- Xilinx Ethernet devices. In addition to general xilinx properties
- listed above, nodes for these devices should include a phy-handle
- property, and may include other common network device properties
- like local-mac-address.
-
- iv) Xilinx Uartlite
-
- Xilinx uartlite devices are simple fixed speed serial ports.
-
- Required properties:
- - current-speed : Baud rate of uartlite
-
- v) Xilinx hwicap
-
- Xilinx hwicap devices provide access to the configuration logic
- of the FPGA through the Internal Configuration Access Port
- (ICAP). The ICAP enables partial reconfiguration of the FPGA,
- readback of the configuration information, and some control over
- 'warm boots' of the FPGA fabric.
-
- Required properties:
- - xlnx,family : The family of the FPGA, necessary since the
- capabilities of the underlying ICAP hardware
- differ between different families. May be
- 'virtex2p', 'virtex4', or 'virtex5'.
-
- vi) Xilinx Uart 16550
-
- Xilinx UART 16550 devices are very similar to the NS16550 but with
- different register spacing and an offset from the base address.
-
- Required properties:
- - clock-frequency : Frequency of the clock input
- - reg-offset : A value of 3 is required
- - reg-shift : A value of 2 is required
-
- e) USB EHCI controllers
-
- Required properties:
- - compatible : should be "usb-ehci".
- - reg : should contain at least address and length of the standard EHCI
- register set for the device. Optional platform-dependent registers
- (debug-port or other) can be also specified here, but only after
- definition of standard EHCI registers.
- - interrupts : one EHCI interrupt should be described here.
- If device registers are implemented in big endian mode, the device
- node should have "big-endian-regs" property.
- If controller implementation operates with big endian descriptors,
- "big-endian-desc" property should be specified.
- If both big endian registers and descriptors are used by the controller
- implementation, "big-endian" property can be specified instead of having
- both "big-endian-regs" and "big-endian-desc".
-
- Example (Sequoia 440EPx):
- ehci@e0000300 {
- compatible = "ibm,usb-ehci-440epx", "usb-ehci";
- interrupt-parent = <&UIC0>;
- interrupts = <1a 4>;
- reg = <0 e0000300 90 0 e0000390 70>;
- big-endian;
- };
-
- f) MDIO on GPIOs
-
- Currently defined compatibles:
- - virtual,gpio-mdio
-
- MDC and MDIO lines connected to GPIO controllers are listed in the
- gpios property as described in section VIII.1 in the following order:
-
- MDC, MDIO.
-
- Example:
-
- mdio {
- compatible = "virtual,mdio-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- gpios = <&qe_pio_a 11
- &qe_pio_c 6>;
- };
-
- g) SPI (Serial Peripheral Interface) busses
-
- SPI busses can be described with a node for the SPI master device
- and a set of child nodes for each SPI slave on the bus. For this
- discussion, it is assumed that the system's SPI controller is in
- SPI master mode. This binding does not describe SPI controllers
- in slave mode.
-
- The SPI master node requires the following properties:
- - #address-cells - number of cells required to define a chip select
- address on the SPI bus.
- - #size-cells - should be zero.
- - compatible - name of SPI bus controller following generic names
- recommended practice.
- No other properties are required in the SPI bus node. It is assumed
- that a driver for an SPI bus device will understand that it is an SPI bus.
- However, the binding does not attempt to define the specific method for
- assigning chip select numbers. Since SPI chip select configuration is
- flexible and non-standardized, it is left out of this binding with the
- assumption that board specific platform code will be used to manage
- chip selects. Individual drivers can define additional properties to
- support describing the chip select layout.
-
- SPI slave nodes must be children of the SPI master node and can
- contain the following properties.
- - reg - (required) chip select address of device.
- - compatible - (required) name of SPI device following generic names
- recommended practice
- - spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz
- - spi-cpol - (optional) Empty property indicating device requires
- inverse clock polarity (CPOL) mode
- - spi-cpha - (optional) Empty property indicating device requires
- shifted clock phase (CPHA) mode
- - spi-cs-high - (optional) Empty property indicating device requires
- chip select active high
-
- SPI example for an MPC5200 SPI bus:
- spi@f00 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
- reg = <0xf00 0x20>;
- interrupts = <2 13 0 2 14 0>;
- interrupt-parent = <&mpc5200_pic>;
-
- ethernet-switch@0 {
- compatible = "micrel,ks8995m";
- spi-max-frequency = <1000000>;
- reg = <0>;
- };
-
- codec@1 {
- compatible = "ti,tlv320aic26";
- spi-max-frequency = <100000>;
- reg = <1>;
- };
- };
-
-VII - Marvell Discovery mv64[345]6x System Controller chips
-===========================================================
-
-The Marvell mv64[345]60 series of system controller chips contain
-many of the peripherals needed to implement a complete computer
-system. In this section, we define device tree nodes to describe
-the system controller chip itself and each of the peripherals
-which it contains. Compatible string values for each node are
-prefixed with the string "marvell,", for Marvell Technology Group Ltd.
-
-1) The /system-controller node
-
- This node is used to represent the system-controller and must be
- present when the system uses a system controller chip. The top-level
- system-controller node contains information that is global to all
- devices within the system controller chip. The node name begins
- with "system-controller" followed by the unit address, which is
- the base address of the memory-mapped register set for the system
- controller chip.
-
- Required properties:
-
- - ranges : Describes the translation of system controller addresses
- for memory mapped registers.
- - clock-frequency: Contains the main clock frequency for the system
- controller chip.
- - reg : This property defines the address and size of the
- memory-mapped registers contained within the system controller
- chip. The address specified in the "reg" property should match
- the unit address of the system-controller node.
- - #address-cells : Address representation for system controller
- devices. This field represents the number of cells needed to
- represent the address of the memory-mapped registers of devices
- within the system controller chip.
- - #size-cells : Size representation for for the memory-mapped
- registers within the system controller chip.
- - #interrupt-cells : Defines the width of cells used to represent
- interrupts.
-
- Optional properties:
-
- - model : The specific model of the system controller chip. Such
- as, "mv64360", "mv64460", or "mv64560".
- - compatible : A string identifying the compatibility identifiers
- of the system controller chip.
-
- The system-controller node contains child nodes for each system
- controller device that the platform uses. Nodes should not be created
- for devices which exist on the system controller chip but are not used
-
- Example Marvell Discovery mv64360 system-controller node:
-
- system-controller@f1000000 { /* Marvell Discovery mv64360 */
- #address-cells = <1>;
- #size-cells = <1>;
- model = "mv64360"; /* Default */
- compatible = "marvell,mv64360";
- clock-frequency = <133333333>;
- reg = <0xf1000000 0x10000>;
- virtual-reg = <0xf1000000>;
- ranges = <0x88000000 0x88000000 0x1000000 /* PCI 0 I/O Space */
- 0x80000000 0x80000000 0x8000000 /* PCI 0 MEM Space */
- 0xa0000000 0xa0000000 0x4000000 /* User FLASH */
- 0x00000000 0xf1000000 0x0010000 /* Bridge's regs */
- 0xf2000000 0xf2000000 0x0040000>;/* Integrated SRAM */
-
- [ child node definitions... ]
- }
-
-2) Child nodes of /system-controller
-
- a) Marvell Discovery MDIO bus
-
- The MDIO is a bus to which the PHY devices are connected. For each
- device that exists on this bus, a child node should be created. See
- the definition of the PHY node below for an example of how to define
- a PHY.
-
- Required properties:
- - #address-cells : Should be <1>
- - #size-cells : Should be <0>
- - device_type : Should be "mdio"
- - compatible : Should be "marvell,mv64360-mdio"
-
- Example:
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- device_type = "mdio";
- compatible = "marvell,mv64360-mdio";
-
- ethernet-phy@0 {
- ......
- };
- };
-
-
- b) Marvell Discovery ethernet controller
-
- The Discover ethernet controller is described with two levels
- of nodes. The first level describes an ethernet silicon block
- and the second level describes up to 3 ethernet nodes within
- that block. The reason for the multiple levels is that the
- registers for the node are interleaved within a single set
- of registers. The "ethernet-block" level describes the
- shared register set, and the "ethernet" nodes describe ethernet
- port-specific properties.
-
- Ethernet block node
-
- Required properties:
- - #address-cells : <1>
- - #size-cells : <0>
- - compatible : "marvell,mv64360-eth-block"
- - reg : Offset and length of the register set for this block
-
- Example Discovery Ethernet block node:
- ethernet-block@2000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "marvell,mv64360-eth-block";
- reg = <0x2000 0x2000>;
- ethernet@0 {
- .......
- };
- };
-
- Ethernet port node
-
- Required properties:
- - device_type : Should be "network".
- - compatible : Should be "marvell,mv64360-eth".
- - reg : Should be <0>, <1>, or <2>, according to which registers
- within the silicon block the device uses.
- - interrupts : <a> where a is the interrupt number for the port.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
- - phy : the phandle for the PHY connected to this ethernet
- controller.
- - local-mac-address : 6 bytes, MAC address
-
- Example Discovery Ethernet port node:
- ethernet@0 {
- device_type = "network";
- compatible = "marvell,mv64360-eth";
- reg = <0>;
- interrupts = <32>;
- interrupt-parent = <&PIC>;
- phy = <&PHY0>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- };
-
-
-
- c) Marvell Discovery PHY nodes
-
- Required properties:
- - device_type : Should be "ethernet-phy"
- - interrupts : <a> where a is the interrupt number for this phy.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- - reg : The ID number for the phy, usually a small integer
-
- Example Discovery PHY node:
- ethernet-phy@1 {
- device_type = "ethernet-phy";
- compatible = "broadcom,bcm5421";
- interrupts = <76>; /* GPP 12 */
- interrupt-parent = <&PIC>;
- reg = <1>;
- };
-
-
- d) Marvell Discovery SDMA nodes
-
- Represent DMA hardware associated with the MPSC (multiprotocol
- serial controllers).
-
- Required properties:
- - compatible : "marvell,mv64360-sdma"
- - reg : Offset and length of the register set for this device
- - interrupts : <a> where a is the interrupt number for the DMA
- device.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery SDMA node:
- sdma@4000 {
- compatible = "marvell,mv64360-sdma";
- reg = <0x4000 0xc18>;
- virtual-reg = <0xf1004000>;
- interrupts = <36>;
- interrupt-parent = <&PIC>;
- };
-
-
- e) Marvell Discovery BRG nodes
-
- Represent baud rate generator hardware associated with the MPSC
- (multiprotocol serial controllers).
-
- Required properties:
- - compatible : "marvell,mv64360-brg"
- - reg : Offset and length of the register set for this device
- - clock-src : A value from 0 to 15 which selects the clock
- source for the baud rate generator. This value corresponds
- to the CLKS value in the BRGx configuration register. See
- the mv64x60 User's Manual.
- - clock-frequence : The frequency (in Hz) of the baud rate
- generator's input clock.
- - current-speed : The current speed setting (presumably by
- firmware) of the baud rate generator.
-
- Example Discovery BRG node:
- brg@b200 {
- compatible = "marvell,mv64360-brg";
- reg = <0xb200 0x8>;
- clock-src = <8>;
- clock-frequency = <133333333>;
- current-speed = <9600>;
- };
-
-
- f) Marvell Discovery CUNIT nodes
-
- Represent the Serial Communications Unit device hardware.
-
- Required properties:
- - reg : Offset and length of the register set for this device
-
- Example Discovery CUNIT node:
- cunit@f200 {
- reg = <0xf200 0x200>;
- };
-
-
- g) Marvell Discovery MPSCROUTING nodes
-
- Represent the Discovery's MPSC routing hardware
-
- Required properties:
- - reg : Offset and length of the register set for this device
-
- Example Discovery CUNIT node:
- mpscrouting@b500 {
- reg = <0xb400 0xc>;
- };
-
-
- h) Marvell Discovery MPSCINTR nodes
-
- Represent the Discovery's MPSC DMA interrupt hardware registers
- (SDMA cause and mask registers).
-
- Required properties:
- - reg : Offset and length of the register set for this device
-
- Example Discovery MPSCINTR node:
- mpsintr@b800 {
- reg = <0xb800 0x100>;
- };
-
-
- i) Marvell Discovery MPSC nodes
-
- Represent the Discovery's MPSC (Multiprotocol Serial Controller)
- serial port.
-
- Required properties:
- - device_type : "serial"
- - compatible : "marvell,mv64360-mpsc"
- - reg : Offset and length of the register set for this device
- - sdma : the phandle for the SDMA node used by this port
- - brg : the phandle for the BRG node used by this port
- - cunit : the phandle for the CUNIT node used by this port
- - mpscrouting : the phandle for the MPSCROUTING node used by this port
- - mpscintr : the phandle for the MPSCINTR node used by this port
- - cell-index : the hardware index of this cell in the MPSC core
- - max_idle : value needed for MPSC CHR3 (Maximum Frame Length)
- register
- - interrupts : <a> where a is the interrupt number for the MPSC.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery MPSCINTR node:
- mpsc@8000 {
- device_type = "serial";
- compatible = "marvell,mv64360-mpsc";
- reg = <0x8000 0x38>;
- virtual-reg = <0xf1008000>;
- sdma = <&SDMA0>;
- brg = <&BRG0>;
- cunit = <&CUNIT>;
- mpscrouting = <&MPSCROUTING>;
- mpscintr = <&MPSCINTR>;
- cell-index = <0>;
- max_idle = <40>;
- interrupts = <40>;
- interrupt-parent = <&PIC>;
- };
-
-
- j) Marvell Discovery Watch Dog Timer nodes
-
- Represent the Discovery's watchdog timer hardware
-
- Required properties:
- - compatible : "marvell,mv64360-wdt"
- - reg : Offset and length of the register set for this device
-
- Example Discovery Watch Dog Timer node:
- wdt@b410 {
- compatible = "marvell,mv64360-wdt";
- reg = <0xb410 0x8>;
- };
-
-
- k) Marvell Discovery I2C nodes
-
- Represent the Discovery's I2C hardware
-
- Required properties:
- - device_type : "i2c"
- - compatible : "marvell,mv64360-i2c"
- - reg : Offset and length of the register set for this device
- - interrupts : <a> where a is the interrupt number for the I2C.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery I2C node:
- compatible = "marvell,mv64360-i2c";
- reg = <0xc000 0x20>;
- virtual-reg = <0xf100c000>;
- interrupts = <37>;
- interrupt-parent = <&PIC>;
- };
-
-
- l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
-
- Represent the Discovery's PIC hardware
-
- Required properties:
- - #interrupt-cells : <1>
- - #address-cells : <0>
- - compatible : "marvell,mv64360-pic"
- - reg : Offset and length of the register set for this device
- - interrupt-controller
-
- Example Discovery PIC node:
- pic {
- #interrupt-cells = <1>;
- #address-cells = <0>;
- compatible = "marvell,mv64360-pic";
- reg = <0x0 0x88>;
- interrupt-controller;
- };
-
-
- m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
-
- Represent the Discovery's MPP hardware
-
- Required properties:
- - compatible : "marvell,mv64360-mpp"
- - reg : Offset and length of the register set for this device
-
- Example Discovery MPP node:
- mpp@f000 {
- compatible = "marvell,mv64360-mpp";
- reg = <0xf000 0x10>;
- };
-
-
- n) Marvell Discovery GPP (General Purpose Pins) nodes
-
- Represent the Discovery's GPP hardware
-
- Required properties:
- - compatible : "marvell,mv64360-gpp"
- - reg : Offset and length of the register set for this device
-
- Example Discovery GPP node:
- gpp@f000 {
- compatible = "marvell,mv64360-gpp";
- reg = <0xf100 0x20>;
- };
-
-
- o) Marvell Discovery PCI host bridge node
-
- Represents the Discovery's PCI host bridge device. The properties
- for this node conform to Rev 2.1 of the PCI Bus Binding to IEEE
- 1275-1994. A typical value for the compatible property is
- "marvell,mv64360-pci".
-
- Example Discovery PCI host bridge node
- pci@80000000 {
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- device_type = "pci";
- compatible = "marvell,mv64360-pci";
- reg = <0xcf8 0x8>;
- ranges = <0x01000000 0x0 0x0
- 0x88000000 0x0 0x01000000
- 0x02000000 0x0 0x80000000
- 0x80000000 0x0 0x08000000>;
- bus-range = <0 255>;
- clock-frequency = <66000000>;
- interrupt-parent = <&PIC>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0a */
- 0x5000 0 0 1 &PIC 80
- 0x5000 0 0 2 &PIC 81
- 0x5000 0 0 3 &PIC 91
- 0x5000 0 0 4 &PIC 93
-
- /* IDSEL 0x0b */
- 0x5800 0 0 1 &PIC 91
- 0x5800 0 0 2 &PIC 93
- 0x5800 0 0 3 &PIC 80
- 0x5800 0 0 4 &PIC 81
-
- /* IDSEL 0x0c */
- 0x6000 0 0 1 &PIC 91
- 0x6000 0 0 2 &PIC 93
- 0x6000 0 0 3 &PIC 80
- 0x6000 0 0 4 &PIC 81
-
- /* IDSEL 0x0d */
- 0x6800 0 0 1 &PIC 93
- 0x6800 0 0 2 &PIC 80
- 0x6800 0 0 3 &PIC 81
- 0x6800 0 0 4 &PIC 91
- >;
- };
-
-
- p) Marvell Discovery CPU Error nodes
-
- Represent the Discovery's CPU error handler device.
-
- Required properties:
- - compatible : "marvell,mv64360-cpu-error"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery CPU Error node:
- cpu-error@0070 {
- compatible = "marvell,mv64360-cpu-error";
- reg = <0x70 0x10 0x128 0x28>;
- interrupts = <3>;
- interrupt-parent = <&PIC>;
- };
-
-
- q) Marvell Discovery SRAM Controller nodes
-
- Represent the Discovery's SRAM controller device.
-
- Required properties:
- - compatible : "marvell,mv64360-sram-ctrl"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery SRAM Controller node:
- sram-ctrl@0380 {
- compatible = "marvell,mv64360-sram-ctrl";
- reg = <0x380 0x80>;
- interrupts = <13>;
- interrupt-parent = <&PIC>;
- };
-
-
- r) Marvell Discovery PCI Error Handler nodes
-
- Represent the Discovery's PCI error handler device.
-
- Required properties:
- - compatible : "marvell,mv64360-pci-error"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery PCI Error Handler node:
- pci-error@1d40 {
- compatible = "marvell,mv64360-pci-error";
- reg = <0x1d40 0x40 0xc28 0x4>;
- interrupts = <12>;
- interrupt-parent = <&PIC>;
- };
-
-
- s) Marvell Discovery Memory Controller nodes
-
- Represent the Discovery's memory controller device.
-
- Required properties:
- - compatible : "marvell,mv64360-mem-ctrl"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery Memory Controller node:
- mem-ctrl@1400 {
- compatible = "marvell,mv64360-mem-ctrl";
- reg = <0x1400 0x60>;
- interrupts = <17>;
- interrupt-parent = <&PIC>;
- };
-
-
-VIII - Specifying interrupt information for devices
+VII - Specifying interrupt information for devices
===================================================
The device tree represents the busses and devices of a hardware
@@ -2439,56 +1324,7 @@ encodings listed below:
2 = high to low edge sensitive type enabled
3 = low to high edge sensitive type enabled
-IX - Specifying GPIO information for devices
-============================================
-
-1) gpios property
------------------
-
-Nodes that makes use of GPIOs should define them using `gpios' property,
-format of which is: <&gpio-controller1-phandle gpio1-specifier
- &gpio-controller2-phandle gpio2-specifier
- 0 /* holes are permitted, means no GPIO 3 */
- &gpio-controller4-phandle gpio4-specifier
- ...>;
-
-Note that gpio-specifier length is controller dependent.
-
-gpio-specifier may encode: bank, pin position inside the bank,
-whether pin is open-drain and whether pin is logically inverted.
-
-Example of the node using GPIOs:
-
- node {
- gpios = <&qe_pio_e 18 0>;
- };
-
-In this example gpio-specifier is "18 0" and encodes GPIO pin number,
-and empty GPIO flags as accepted by the "qe_pio_e" gpio-controller.
-
-2) gpio-controller nodes
-------------------------
-
-Every GPIO controller node must have #gpio-cells property defined,
-this information will be used to translate gpio-specifiers.
-
-Example of two SOC GPIO banks defined as gpio-controller nodes:
-
- qe_pio_a: gpio-controller@1400 {
- #gpio-cells = <2>;
- compatible = "fsl,qe-pario-bank-a", "fsl,qe-pario-bank";
- reg = <0x1400 0x18>;
- gpio-controller;
- };
-
- qe_pio_e: gpio-controller@1460 {
- #gpio-cells = <2>;
- compatible = "fsl,qe-pario-bank-e", "fsl,qe-pario-bank";
- reg = <0x1460 0x18>;
- gpio-controller;
- };
-
-X - Specifying Device Power Management Information (sleep property)
+VIII - Specifying Device Power Management Information (sleep property)
===================================================================
Devices on SOCs often have mechanisms for placing devices into low-power
diff --git a/Documentation/powerpc/dts-bindings/4xx/emac.txt b/Documentation/powerpc/dts-bindings/4xx/emac.txt
new file mode 100644
index 000000000000..2161334a7ca5
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/emac.txt
@@ -0,0 +1,148 @@
+ 4xx/Axon EMAC ethernet nodes
+
+ The EMAC ethernet controller in IBM and AMCC 4xx chips, and also
+ the Axon bridge. To operate this needs to interact with a ths
+ special McMAL DMA controller, and sometimes an RGMII or ZMII
+ interface. In addition to the nodes and properties described
+ below, the node for the OPB bus on which the EMAC sits must have a
+ correct clock-frequency property.
+
+ i) The EMAC node itself
+
+ Required properties:
+ - device_type : "network"
+
+ - compatible : compatible list, contains 2 entries, first is
+ "ibm,emac-CHIP" where CHIP is the host ASIC (440gx,
+ 405gp, Axon) and second is either "ibm,emac" or
+ "ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
+ "ibm,emac4"
+ - interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
+ - interrupt-parent : optional, if needed for interrupt mapping
+ - reg : <registers mapping>
+ - local-mac-address : 6 bytes, MAC address
+ - mal-device : phandle of the associated McMAL node
+ - mal-tx-channel : 1 cell, index of the tx channel on McMAL associated
+ with this EMAC
+ - mal-rx-channel : 1 cell, index of the rx channel on McMAL associated
+ with this EMAC
+ - cell-index : 1 cell, hardware index of the EMAC cell on a given
+ ASIC (typically 0x0 and 0x1 for EMAC0 and EMAC1 on
+ each Axon chip)
+ - max-frame-size : 1 cell, maximum frame size supported in bytes
+ - rx-fifo-size : 1 cell, Rx fifo size in bytes for 10 and 100 Mb/sec
+ operations.
+ For Axon, 2048
+ - tx-fifo-size : 1 cell, Tx fifo size in bytes for 10 and 100 Mb/sec
+ operations.
+ For Axon, 2048.
+ - fifo-entry-size : 1 cell, size of a fifo entry (used to calculate
+ thresholds).
+ For Axon, 0x00000010
+ - mal-burst-size : 1 cell, MAL burst size (used to calculate thresholds)
+ in bytes.
+ For Axon, 0x00000100 (I think ...)
+ - phy-mode : string, mode of operations of the PHY interface.
+ Supported values are: "mii", "rmii", "smii", "rgmii",
+ "tbi", "gmii", rtbi", "sgmii".
+ For Axon on CAB, it is "rgmii"
+ - mdio-device : 1 cell, required iff using shared MDIO registers
+ (440EP). phandle of the EMAC to use to drive the
+ MDIO lines for the PHY used by this EMAC.
+ - zmii-device : 1 cell, required iff connected to a ZMII. phandle of
+ the ZMII device node
+ - zmii-channel : 1 cell, required iff connected to a ZMII. Which ZMII
+ channel or 0xffffffff if ZMII is only used for MDIO.
+ - rgmii-device : 1 cell, required iff connected to an RGMII. phandle
+ of the RGMII device node.
+ For Axon: phandle of plb5/plb4/opb/rgmii
+ - rgmii-channel : 1 cell, required iff connected to an RGMII. Which
+ RGMII channel is used by this EMAC.
+ Fox Axon: present, whatever value is appropriate for each
+ EMAC, that is the content of the current (bogus) "phy-port"
+ property.
+
+ Optional properties:
+ - phy-address : 1 cell, optional, MDIO address of the PHY. If absent,
+ a search is performed.
+ - phy-map : 1 cell, optional, bitmap of addresses to probe the PHY
+ for, used if phy-address is absent. bit 0x00000001 is
+ MDIO address 0.
+ For Axon it can be absent, though my current driver
+ doesn't handle phy-address yet so for now, keep
+ 0x00ffffff in it.
+ - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
+ operations (if absent the value is the same as
+ rx-fifo-size). For Axon, either absent or 2048.
+ - tx-fifo-size-gige : 1 cell, Tx fifo size in bytes for 1000 Mb/sec
+ operations (if absent the value is the same as
+ tx-fifo-size). For Axon, either absent or 2048.
+ - tah-device : 1 cell, optional. If connected to a TAH engine for
+ offload, phandle of the TAH device node.
+ - tah-channel : 1 cell, optional. If appropriate, channel used on the
+ TAH engine.
+
+ Example:
+
+ EMAC0: ethernet@40000800 {
+ device_type = "network";
+ compatible = "ibm,emac-440gp", "ibm,emac";
+ interrupt-parent = <&UIC1>;
+ interrupts = <1c 4 1d 4>;
+ reg = <40000800 70>;
+ local-mac-address = [00 04 AC E3 1B 1E];
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0 1>;
+ mal-rx-channel = <0>;
+ cell-index = <0>;
+ max-frame-size = <5dc>;
+ rx-fifo-size = <1000>;
+ tx-fifo-size = <800>;
+ phy-mode = "rmii";
+ phy-map = <00000001>;
+ zmii-device = <&ZMII0>;
+ zmii-channel = <0>;
+ };
+
+ ii) McMAL node
+
+ Required properties:
+ - device_type : "dma-controller"
+ - compatible : compatible list, containing 2 entries, first is
+ "ibm,mcmal-CHIP" where CHIP is the host ASIC (like
+ emac) and the second is either "ibm,mcmal" or
+ "ibm,mcmal2".
+ For Axon, "ibm,mcmal-axon","ibm,mcmal2"
+ - interrupts : <interrupt mapping for the MAL interrupts sources:
+ 5 sources: tx_eob, rx_eob, serr, txde, rxde>.
+ For Axon: This is _different_ from the current
+ firmware. We use the "delayed" interrupts for txeob
+ and rxeob. Thus we end up with mapping those 5 MPIC
+ interrupts, all level positive sensitive: 10, 11, 32,
+ 33, 34 (in decimal)
+ - dcr-reg : < DCR registers range >
+ - dcr-parent : if needed for dcr-reg
+ - num-tx-chans : 1 cell, number of Tx channels
+ - num-rx-chans : 1 cell, number of Rx channels
+
+ iii) ZMII node
+
+ Required properties:
+ - compatible : compatible list, containing 2 entries, first is
+ "ibm,zmii-CHIP" where CHIP is the host ASIC (like
+ EMAC) and the second is "ibm,zmii".
+ For Axon, there is no ZMII node.
+ - reg : <registers mapping>
+
+ iv) RGMII node
+
+ Required properties:
+ - compatible : compatible list, containing 2 entries, first is
+ "ibm,rgmii-CHIP" where CHIP is the host ASIC (like
+ EMAC) and the second is "ibm,rgmii".
+ For Axon, "ibm,rgmii-axon","ibm,rgmii"
+ - reg : <registers mapping>
+ - revision : as provided by the RGMII new version register if
+ available.
+ For Axon: 0x0000012a
+
diff --git a/Documentation/powerpc/dts-bindings/gpio/gpio.txt b/Documentation/powerpc/dts-bindings/gpio/gpio.txt
new file mode 100644
index 000000000000..edaa84d288a1
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/gpio/gpio.txt
@@ -0,0 +1,50 @@
+Specifying GPIO information for devices
+============================================
+
+1) gpios property
+-----------------
+
+Nodes that makes use of GPIOs should define them using `gpios' property,
+format of which is: <&gpio-controller1-phandle gpio1-specifier
+ &gpio-controller2-phandle gpio2-specifier
+ 0 /* holes are permitted, means no GPIO 3 */
+ &gpio-controller4-phandle gpio4-specifier
+ ...>;
+
+Note that gpio-specifier length is controller dependent.
+
+gpio-specifier may encode: bank, pin position inside the bank,
+whether pin is open-drain and whether pin is logically inverted.
+
+Example of the node using GPIOs:
+
+ node {
+ gpios = <&qe_pio_e 18 0>;
+ };
+
+In this example gpio-specifier is "18 0" and encodes GPIO pin number,
+and empty GPIO flags as accepted by the "qe_pio_e" gpio-controller.
+
+2) gpio-controller nodes
+------------------------
+
+Every GPIO controller node must have #gpio-cells property defined,
+this information will be used to translate gpio-specifiers.
+
+Example of two SOC GPIO banks defined as gpio-controller nodes:
+
+ qe_pio_a: gpio-controller@1400 {
+ #gpio-cells = <2>;
+ compatible = "fsl,qe-pario-bank-a", "fsl,qe-pario-bank";
+ reg = <0x1400 0x18>;
+ gpio-controller;
+ };
+
+ qe_pio_e: gpio-controller@1460 {
+ #gpio-cells = <2>;
+ compatible = "fsl,qe-pario-bank-e", "fsl,qe-pario-bank";
+ reg = <0x1460 0x18>;
+ gpio-controller;
+ };
+
+
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/powerpc/dts-bindings/gpio/led.txt
index 4fe14deedc0a..064db928c3c1 100644
--- a/Documentation/powerpc/dts-bindings/gpio/led.txt
+++ b/Documentation/powerpc/dts-bindings/gpio/led.txt
@@ -16,10 +16,17 @@ LED sub-node properties:
string defining the trigger assigned to the LED. Current triggers are:
"backlight" - LED will act as a back-light, controlled by the framebuffer
system
- "default-on" - LED will turn on
+ "default-on" - LED will turn on, but see "default-state" below
"heartbeat" - LED "double" flashes at a load average based rate
"ide-disk" - LED indicates disk activity
"timer" - LED flashes at a fixed, configurable rate
+- default-state: (optional) The initial state of the LED. Valid
+ values are "on", "off", and "keep". If the LED is already on or off
+ and the default-state property is set the to same value, then no
+ glitch should be produced where the LED momentarily turns off (or
+ on). The "keep" setting will keep the LED at whatever its current
+ state is, without producing a glitch. The default is off if this
+ property is not present.
Examples:
@@ -30,14 +37,22 @@ leds {
gpios = <&mcu_pio 0 1>; /* Active low */
linux,default-trigger = "ide-disk";
};
+
+ fault {
+ gpios = <&mcu_pio 1 0>;
+ /* Keep LED on if BIOS detected hardware fault */
+ default-state = "keep";
+ };
};
run-control {
compatible = "gpio-leds";
red {
gpios = <&mpc8572 6 0>;
+ default-state = "off";
};
green {
gpios = <&mpc8572 7 0>;
+ default-state = "on";
};
}
diff --git a/Documentation/powerpc/dts-bindings/gpio/mdio.txt b/Documentation/powerpc/dts-bindings/gpio/mdio.txt
new file mode 100644
index 000000000000..bc9549529014
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/gpio/mdio.txt
@@ -0,0 +1,19 @@
+MDIO on GPIOs
+
+Currently defined compatibles:
+- virtual,gpio-mdio
+
+MDC and MDIO lines connected to GPIO controllers are listed in the
+gpios property as described in section VIII.1 in the following order:
+
+MDC, MDIO.
+
+Example:
+
+mdio {
+ compatible = "virtual,mdio-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpios = <&qe_pio_a 11
+ &qe_pio_c 6>;
+};
diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/powerpc/dts-bindings/marvell.txt
new file mode 100644
index 000000000000..3708a2fd4747
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/marvell.txt
@@ -0,0 +1,521 @@
+Marvell Discovery mv64[345]6x System Controller chips
+===========================================================
+
+The Marvell mv64[345]60 series of system controller chips contain
+many of the peripherals needed to implement a complete computer
+system. In this section, we define device tree nodes to describe
+the system controller chip itself and each of the peripherals
+which it contains. Compatible string values for each node are
+prefixed with the string "marvell,", for Marvell Technology Group Ltd.
+
+1) The /system-controller node
+
+ This node is used to represent the system-controller and must be
+ present when the system uses a system controller chip. The top-level
+ system-controller node contains information that is global to all
+ devices within the system controller chip. The node name begins
+ with "system-controller" followed by the unit address, which is
+ the base address of the memory-mapped register set for the system
+ controller chip.
+
+ Required properties:
+
+ - ranges : Describes the translation of system controller addresses
+ for memory mapped registers.
+ - clock-frequency: Contains the main clock frequency for the system
+ controller chip.
+ - reg : This property defines the address and size of the
+ memory-mapped registers contained within the system controller
+ chip. The address specified in the "reg" property should match
+ the unit address of the system-controller node.
+ - #address-cells : Address representation for system controller
+ devices. This field represents the number of cells needed to
+ represent the address of the memory-mapped registers of devices
+ within the system controller chip.
+ - #size-cells : Size representation for for the memory-mapped
+ registers within the system controller chip.
+ - #interrupt-cells : Defines the width of cells used to represent
+ interrupts.
+
+ Optional properties:
+
+ - model : The specific model of the system controller chip. Such
+ as, "mv64360", "mv64460", or "mv64560".
+ - compatible : A string identifying the compatibility identifiers
+ of the system controller chip.
+
+ The system-controller node contains child nodes for each system
+ controller device that the platform uses. Nodes should not be created
+ for devices which exist on the system controller chip but are not used
+
+ Example Marvell Discovery mv64360 system-controller node:
+
+ system-controller@f1000000 { /* Marvell Discovery mv64360 */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ model = "mv64360"; /* Default */
+ compatible = "marvell,mv64360";
+ clock-frequency = <133333333>;
+ reg = <0xf1000000 0x10000>;
+ virtual-reg = <0xf1000000>;
+ ranges = <0x88000000 0x88000000 0x1000000 /* PCI 0 I/O Space */
+ 0x80000000 0x80000000 0x8000000 /* PCI 0 MEM Space */
+ 0xa0000000 0xa0000000 0x4000000 /* User FLASH */
+ 0x00000000 0xf1000000 0x0010000 /* Bridge's regs */
+ 0xf2000000 0xf2000000 0x0040000>;/* Integrated SRAM */
+
+ [ child node definitions... ]
+ }
+
+2) Child nodes of /system-controller
+
+ a) Marvell Discovery MDIO bus
+
+ The MDIO is a bus to which the PHY devices are connected. For each
+ device that exists on this bus, a child node should be created. See
+ the definition of the PHY node below for an example of how to define
+ a PHY.
+
+ Required properties:
+ - #address-cells : Should be <1>
+ - #size-cells : Should be <0>
+ - device_type : Should be "mdio"
+ - compatible : Should be "marvell,mv64360-mdio"
+
+ Example:
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ device_type = "mdio";
+ compatible = "marvell,mv64360-mdio";
+
+ ethernet-phy@0 {
+ ......
+ };
+ };
+
+
+ b) Marvell Discovery ethernet controller
+
+ The Discover ethernet controller is described with two levels
+ of nodes. The first level describes an ethernet silicon block
+ and the second level describes up to 3 ethernet nodes within
+ that block. The reason for the multiple levels is that the
+ registers for the node are interleaved within a single set
+ of registers. The "ethernet-block" level describes the
+ shared register set, and the "ethernet" nodes describe ethernet
+ port-specific properties.
+
+ Ethernet block node
+
+ Required properties:
+ - #address-cells : <1>
+ - #size-cells : <0>
+ - compatible : "marvell,mv64360-eth-block"
+ - reg : Offset and length of the register set for this block
+
+ Example Discovery Ethernet block node:
+ ethernet-block@2000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,mv64360-eth-block";
+ reg = <0x2000 0x2000>;
+ ethernet@0 {
+ .......
+ };
+ };
+
+ Ethernet port node
+
+ Required properties:
+ - device_type : Should be "network".
+ - compatible : Should be "marvell,mv64360-eth".
+ - reg : Should be <0>, <1>, or <2>, according to which registers
+ within the silicon block the device uses.
+ - interrupts : <a> where a is the interrupt number for the port.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+ - phy : the phandle for the PHY connected to this ethernet
+ controller.
+ - local-mac-address : 6 bytes, MAC address
+
+ Example Discovery Ethernet port node:
+ ethernet@0 {
+ device_type = "network";
+ compatible = "marvell,mv64360-eth";
+ reg = <0>;
+ interrupts = <32>;
+ interrupt-parent = <&PIC>;
+ phy = <&PHY0>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+
+
+
+ c) Marvell Discovery PHY nodes
+
+ Required properties:
+ - device_type : Should be "ethernet-phy"
+ - interrupts : <a> where a is the interrupt number for this phy.
+ - interrupt-parent : the phandle for the interrupt controller that
+ services interrupts for this device.
+ - reg : The ID number for the phy, usually a small integer
+
+ Example Discovery PHY node:
+ ethernet-phy@1 {
+ device_type = "ethernet-phy";
+ compatible = "broadcom,bcm5421";
+ interrupts = <76>; /* GPP 12 */
+ interrupt-parent = <&PIC>;
+ reg = <1>;
+ };
+
+
+ d) Marvell Discovery SDMA nodes
+
+ Represent DMA hardware associated with the MPSC (multiprotocol
+ serial controllers).
+
+ Required properties:
+ - compatible : "marvell,mv64360-sdma"
+ - reg : Offset and length of the register set for this device
+ - interrupts : <a> where a is the interrupt number for the DMA
+ device.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery SDMA node:
+ sdma@4000 {
+ compatible = "marvell,mv64360-sdma";
+ reg = <0x4000 0xc18>;
+ virtual-reg = <0xf1004000>;
+ interrupts = <36>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ e) Marvell Discovery BRG nodes
+
+ Represent baud rate generator hardware associated with the MPSC
+ (multiprotocol serial controllers).
+
+ Required properties:
+ - compatible : "marvell,mv64360-brg"
+ - reg : Offset and length of the register set for this device
+ - clock-src : A value from 0 to 15 which selects the clock
+ source for the baud rate generator. This value corresponds
+ to the CLKS value in the BRGx configuration register. See
+ the mv64x60 User's Manual.
+ - clock-frequence : The frequency (in Hz) of the baud rate
+ generator's input clock.
+ - current-speed : The current speed setting (presumably by
+ firmware) of the baud rate generator.
+
+ Example Discovery BRG node:
+ brg@b200 {
+ compatible = "marvell,mv64360-brg";
+ reg = <0xb200 0x8>;
+ clock-src = <8>;
+ clock-frequency = <133333333>;
+ current-speed = <9600>;
+ };
+
+
+ f) Marvell Discovery CUNIT nodes
+
+ Represent the Serial Communications Unit device hardware.
+
+ Required properties:
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery CUNIT node:
+ cunit@f200 {
+ reg = <0xf200 0x200>;
+ };
+
+
+ g) Marvell Discovery MPSCROUTING nodes
+
+ Represent the Discovery's MPSC routing hardware
+
+ Required properties:
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery CUNIT node:
+ mpscrouting@b500 {
+ reg = <0xb400 0xc>;
+ };
+
+
+ h) Marvell Discovery MPSCINTR nodes
+
+ Represent the Discovery's MPSC DMA interrupt hardware registers
+ (SDMA cause and mask registers).
+
+ Required properties:
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery MPSCINTR node:
+ mpsintr@b800 {
+ reg = <0xb800 0x100>;
+ };
+
+
+ i) Marvell Discovery MPSC nodes
+
+ Represent the Discovery's MPSC (Multiprotocol Serial Controller)
+ serial port.
+
+ Required properties:
+ - device_type : "serial"
+ - compatible : "marvell,mv64360-mpsc"
+ - reg : Offset and length of the register set for this device
+ - sdma : the phandle for the SDMA node used by this port
+ - brg : the phandle for the BRG node used by this port
+ - cunit : the phandle for the CUNIT node used by this port
+ - mpscrouting : the phandle for the MPSCROUTING node used by this port
+ - mpscintr : the phandle for the MPSCINTR node used by this port
+ - cell-index : the hardware index of this cell in the MPSC core
+ - max_idle : value needed for MPSC CHR3 (Maximum Frame Length)
+ register
+ - interrupts : <a> where a is the interrupt number for the MPSC.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery MPSCINTR node:
+ mpsc@8000 {
+ device_type = "serial";
+ compatible = "marvell,mv64360-mpsc";
+ reg = <0x8000 0x38>;
+ virtual-reg = <0xf1008000>;
+ sdma = <&SDMA0>;
+ brg = <&BRG0>;
+ cunit = <&CUNIT>;
+ mpscrouting = <&MPSCROUTING>;
+ mpscintr = <&MPSCINTR>;
+ cell-index = <0>;
+ max_idle = <40>;
+ interrupts = <40>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ j) Marvell Discovery Watch Dog Timer nodes
+
+ Represent the Discovery's watchdog timer hardware
+
+ Required properties:
+ - compatible : "marvell,mv64360-wdt"
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery Watch Dog Timer node:
+ wdt@b410 {
+ compatible = "marvell,mv64360-wdt";
+ reg = <0xb410 0x8>;
+ };
+
+
+ k) Marvell Discovery I2C nodes
+
+ Represent the Discovery's I2C hardware
+
+ Required properties:
+ - device_type : "i2c"
+ - compatible : "marvell,mv64360-i2c"
+ - reg : Offset and length of the register set for this device
+ - interrupts : <a> where a is the interrupt number for the I2C.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery I2C node:
+ compatible = "marvell,mv64360-i2c";
+ reg = <0xc000 0x20>;
+ virtual-reg = <0xf100c000>;
+ interrupts = <37>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
+
+ Represent the Discovery's PIC hardware
+
+ Required properties:
+ - #interrupt-cells : <1>
+ - #address-cells : <0>
+ - compatible : "marvell,mv64360-pic"
+ - reg : Offset and length of the register set for this device
+ - interrupt-controller
+
+ Example Discovery PIC node:
+ pic {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ compatible = "marvell,mv64360-pic";
+ reg = <0x0 0x88>;
+ interrupt-controller;
+ };
+
+
+ m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
+
+ Represent the Discovery's MPP hardware
+
+ Required properties:
+ - compatible : "marvell,mv64360-mpp"
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery MPP node:
+ mpp@f000 {
+ compatible = "marvell,mv64360-mpp";
+ reg = <0xf000 0x10>;
+ };
+
+
+ n) Marvell Discovery GPP (General Purpose Pins) nodes
+
+ Represent the Discovery's GPP hardware
+
+ Required properties:
+ - compatible : "marvell,mv64360-gpp"
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery GPP node:
+ gpp@f000 {
+ compatible = "marvell,mv64360-gpp";
+ reg = <0xf100 0x20>;
+ };
+
+
+ o) Marvell Discovery PCI host bridge node
+
+ Represents the Discovery's PCI host bridge device. The properties
+ for this node conform to Rev 2.1 of the PCI Bus Binding to IEEE
+ 1275-1994. A typical value for the compatible property is
+ "marvell,mv64360-pci".
+
+ Example Discovery PCI host bridge node
+ pci@80000000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ compatible = "marvell,mv64360-pci";
+ reg = <0xcf8 0x8>;
+ ranges = <0x01000000 0x0 0x0
+ 0x88000000 0x0 0x01000000
+ 0x02000000 0x0 0x80000000
+ 0x80000000 0x0 0x08000000>;
+ bus-range = <0 255>;
+ clock-frequency = <66000000>;
+ interrupt-parent = <&PIC>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0a */
+ 0x5000 0 0 1 &PIC 80
+ 0x5000 0 0 2 &PIC 81
+ 0x5000 0 0 3 &PIC 91
+ 0x5000 0 0 4 &PIC 93
+
+ /* IDSEL 0x0b */
+ 0x5800 0 0 1 &PIC 91
+ 0x5800 0 0 2 &PIC 93
+ 0x5800 0 0 3 &PIC 80
+ 0x5800 0 0 4 &PIC 81
+
+ /* IDSEL 0x0c */
+ 0x6000 0 0 1 &PIC 91
+ 0x6000 0 0 2 &PIC 93
+ 0x6000 0 0 3 &PIC 80
+ 0x6000 0 0 4 &PIC 81
+
+ /* IDSEL 0x0d */
+ 0x6800 0 0 1 &PIC 93
+ 0x6800 0 0 2 &PIC 80
+ 0x6800 0 0 3 &PIC 81
+ 0x6800 0 0 4 &PIC 91
+ >;
+ };
+
+
+ p) Marvell Discovery CPU Error nodes
+
+ Represent the Discovery's CPU error handler device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-cpu-error"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery CPU Error node:
+ cpu-error@0070 {
+ compatible = "marvell,mv64360-cpu-error";
+ reg = <0x70 0x10 0x128 0x28>;
+ interrupts = <3>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ q) Marvell Discovery SRAM Controller nodes
+
+ Represent the Discovery's SRAM controller device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-sram-ctrl"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery SRAM Controller node:
+ sram-ctrl@0380 {
+ compatible = "marvell,mv64360-sram-ctrl";
+ reg = <0x380 0x80>;
+ interrupts = <13>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ r) Marvell Discovery PCI Error Handler nodes
+
+ Represent the Discovery's PCI error handler device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-pci-error"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery PCI Error Handler node:
+ pci-error@1d40 {
+ compatible = "marvell,mv64360-pci-error";
+ reg = <0x1d40 0x40 0xc28 0x4>;
+ interrupts = <12>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ s) Marvell Discovery Memory Controller nodes
+
+ Represent the Discovery's memory controller device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-mem-ctrl"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery Memory Controller node:
+ mem-ctrl@1400 {
+ compatible = "marvell,mv64360-mem-ctrl";
+ reg = <0x1400 0x60>;
+ interrupts = <17>;
+ interrupt-parent = <&PIC>;
+ };
+
+
diff --git a/Documentation/powerpc/dts-bindings/phy.txt b/Documentation/powerpc/dts-bindings/phy.txt
new file mode 100644
index 000000000000..bb8c742eb8c5
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/phy.txt
@@ -0,0 +1,25 @@
+PHY nodes
+
+Required properties:
+
+ - device_type : Should be "ethernet-phy"
+ - interrupts : <a b> where a is the interrupt number and b is a
+ field that represents an encoding of the sense and level
+ information for the interrupt. This should be encoded based on
+ the information in section 2) depending on the type of interrupt
+ controller you have.
+ - interrupt-parent : the phandle for the interrupt controller that
+ services interrupts for this device.
+ - reg : The ID number for the phy, usually a small integer
+ - linux,phandle : phandle for this node; likely referenced by an
+ ethernet controller node.
+
+Example:
+
+ethernet-phy@0 {
+ linux,phandle = <2452000>
+ interrupt-parent = <40000>;
+ interrupts = <35 1>;
+ reg = <0>;
+ device_type = "ethernet-phy";
+};
diff --git a/Documentation/powerpc/dts-bindings/spi-bus.txt b/Documentation/powerpc/dts-bindings/spi-bus.txt
new file mode 100644
index 000000000000..e782add2e457
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/spi-bus.txt
@@ -0,0 +1,57 @@
+SPI (Serial Peripheral Interface) busses
+
+SPI busses can be described with a node for the SPI master device
+and a set of child nodes for each SPI slave on the bus. For this
+discussion, it is assumed that the system's SPI controller is in
+SPI master mode. This binding does not describe SPI controllers
+in slave mode.
+
+The SPI master node requires the following properties:
+- #address-cells - number of cells required to define a chip select
+ address on the SPI bus.
+- #size-cells - should be zero.
+- compatible - name of SPI bus controller following generic names
+ recommended practice.
+No other properties are required in the SPI bus node. It is assumed
+that a driver for an SPI bus device will understand that it is an SPI bus.
+However, the binding does not attempt to define the specific method for
+assigning chip select numbers. Since SPI chip select configuration is
+flexible and non-standardized, it is left out of this binding with the
+assumption that board specific platform code will be used to manage
+chip selects. Individual drivers can define additional properties to
+support describing the chip select layout.
+
+SPI slave nodes must be children of the SPI master node and can
+contain the following properties.
+- reg - (required) chip select address of device.
+- compatible - (required) name of SPI device following generic names
+ recommended practice
+- spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz
+- spi-cpol - (optional) Empty property indicating device requires
+ inverse clock polarity (CPOL) mode
+- spi-cpha - (optional) Empty property indicating device requires
+ shifted clock phase (CPHA) mode
+- spi-cs-high - (optional) Empty property indicating device requires
+ chip select active high
+
+SPI example for an MPC5200 SPI bus:
+ spi@f00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
+ reg = <0xf00 0x20>;
+ interrupts = <2 13 0 2 14 0>;
+ interrupt-parent = <&mpc5200_pic>;
+
+ ethernet-switch@0 {
+ compatible = "micrel,ks8995m";
+ spi-max-frequency = <1000000>;
+ reg = <0>;
+ };
+
+ codec@1 {
+ compatible = "ti,tlv320aic26";
+ spi-max-frequency = <100000>;
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/powerpc/dts-bindings/usb-ehci.txt b/Documentation/powerpc/dts-bindings/usb-ehci.txt
new file mode 100644
index 000000000000..fa18612f757b
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/usb-ehci.txt
@@ -0,0 +1,25 @@
+USB EHCI controllers
+
+Required properties:
+ - compatible : should be "usb-ehci".
+ - reg : should contain at least address and length of the standard EHCI
+ register set for the device. Optional platform-dependent registers
+ (debug-port or other) can be also specified here, but only after
+ definition of standard EHCI registers.
+ - interrupts : one EHCI interrupt should be described here.
+If device registers are implemented in big endian mode, the device
+node should have "big-endian-regs" property.
+If controller implementation operates with big endian descriptors,
+"big-endian-desc" property should be specified.
+If both big endian registers and descriptors are used by the controller
+implementation, "big-endian" property can be specified instead of having
+both "big-endian-regs" and "big-endian-desc".
+
+Example (Sequoia 440EPx):
+ ehci@e0000300 {
+ compatible = "ibm,usb-ehci-440epx", "usb-ehci";
+ interrupt-parent = <&UIC0>;
+ interrupts = <1a 4>;
+ reg = <0 e0000300 90 0 e0000390 70>;
+ big-endian;
+ };
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
new file mode 100644
index 000000000000..80339fe4300b
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -0,0 +1,295 @@
+ d) Xilinx IP cores
+
+ The Xilinx EDK toolchain ships with a set of IP cores (devices) for use
+ in Xilinx Spartan and Virtex FPGAs. The devices cover the whole range
+ of standard device types (network, serial, etc.) and miscellaneous
+ devices (gpio, LCD, spi, etc). Also, since these devices are
+ implemented within the fpga fabric every instance of the device can be
+ synthesised with different options that change the behaviour.
+
+ Each IP-core has a set of parameters which the FPGA designer can use to
+ control how the core is synthesized. Historically, the EDK tool would
+ extract the device parameters relevant to device drivers and copy them
+ into an 'xparameters.h' in the form of #define symbols. This tells the
+ device drivers how the IP cores are configured, but it requres the kernel
+ to be recompiled every time the FPGA bitstream is resynthesized.
+
+ The new approach is to export the parameters into the device tree and
+ generate a new device tree each time the FPGA bitstream changes. The
+ parameters which used to be exported as #defines will now become
+ properties of the device node. In general, device nodes for IP-cores
+ will take the following form:
+
+ (name): (generic-name)@(base-address) {
+ compatible = "xlnx,(ip-core-name)-(HW_VER)"
+ [, (list of compatible devices), ...];
+ reg = <(baseaddr) (size)>;
+ interrupt-parent = <&interrupt-controller-phandle>;
+ interrupts = < ... >;
+ xlnx,(parameter1) = "(string-value)";
+ xlnx,(parameter2) = <(int-value)>;
+ };
+
+ (generic-name): an open firmware-style name that describes the
+ generic class of device. Preferably, this is one word, such
+ as 'serial' or 'ethernet'.
+ (ip-core-name): the name of the ip block (given after the BEGIN
+ directive in system.mhs). Should be in lowercase
+ and all underscores '_' converted to dashes '-'.
+ (name): is derived from the "PARAMETER INSTANCE" value.
+ (parameter#): C_* parameters from system.mhs. The C_ prefix is
+ dropped from the parameter name, the name is converted
+ to lowercase and all underscore '_' characters are
+ converted to dashes '-'.
+ (baseaddr): the baseaddr parameter value (often named C_BASEADDR).
+ (HW_VER): from the HW_VER parameter.
+ (size): the address range size (often C_HIGHADDR - C_BASEADDR + 1).
+
+ Typically, the compatible list will include the exact IP core version
+ followed by an older IP core version which implements the same
+ interface or any other device with the same interface.
+
+ 'reg', 'interrupt-parent' and 'interrupts' are all optional properties.
+
+ For example, the following block from system.mhs:
+
+ BEGIN opb_uartlite
+ PARAMETER INSTANCE = opb_uartlite_0
+ PARAMETER HW_VER = 1.00.b
+ PARAMETER C_BAUDRATE = 115200
+ PARAMETER C_DATA_BITS = 8
+ PARAMETER C_ODD_PARITY = 0
+ PARAMETER C_USE_PARITY = 0
+ PARAMETER C_CLK_FREQ = 50000000
+ PARAMETER C_BASEADDR = 0xEC100000
+ PARAMETER C_HIGHADDR = 0xEC10FFFF
+ BUS_INTERFACE SOPB = opb_7
+ PORT OPB_Clk = CLK_50MHz
+ PORT Interrupt = opb_uartlite_0_Interrupt
+ PORT RX = opb_uartlite_0_RX
+ PORT TX = opb_uartlite_0_TX
+ PORT OPB_Rst = sys_bus_reset_0
+ END
+
+ becomes the following device tree node:
+
+ opb_uartlite_0: serial@ec100000 {
+ device_type = "serial";
+ compatible = "xlnx,opb-uartlite-1.00.b";
+ reg = <ec100000 10000>;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = <1 0>; // got this from the opb_intc parameters
+ current-speed = <d#115200>; // standard serial device prop
+ clock-frequency = <d#50000000>; // standard serial device prop
+ xlnx,data-bits = <8>;
+ xlnx,odd-parity = <0>;
+ xlnx,use-parity = <0>;
+ };
+
+ Some IP cores actually implement 2 or more logical devices. In
+ this case, the device should still describe the whole IP core with
+ a single node and add a child node for each logical device. The
+ ranges property can be used to translate from parent IP-core to the
+ registers of each device. In addition, the parent node should be
+ compatible with the bus type 'xlnx,compound', and should contain
+ #address-cells and #size-cells, as with any other bus. (Note: this
+ makes the assumption that both logical devices have the same bus
+ binding. If this is not true, then separate nodes should be used
+ for each logical device). The 'cell-index' property can be used to
+ enumerate logical devices within an IP core. For example, the
+ following is the system.mhs entry for the dual ps2 controller found
+ on the ml403 reference design.
+
+ BEGIN opb_ps2_dual_ref
+ PARAMETER INSTANCE = opb_ps2_dual_ref_0
+ PARAMETER HW_VER = 1.00.a
+ PARAMETER C_BASEADDR = 0xA9000000
+ PARAMETER C_HIGHADDR = 0xA9001FFF
+ BUS_INTERFACE SOPB = opb_v20_0
+ PORT Sys_Intr1 = ps2_1_intr
+ PORT Sys_Intr2 = ps2_2_intr
+ PORT Clkin1 = ps2_clk_rx_1
+ PORT Clkin2 = ps2_clk_rx_2
+ PORT Clkpd1 = ps2_clk_tx_1
+ PORT Clkpd2 = ps2_clk_tx_2
+ PORT Rx1 = ps2_d_rx_1
+ PORT Rx2 = ps2_d_rx_2
+ PORT Txpd1 = ps2_d_tx_1
+ PORT Txpd2 = ps2_d_tx_2
+ END
+
+ It would result in the following device tree nodes:
+
+ opb_ps2_dual_ref_0: opb-ps2-dual-ref@a9000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,compound";
+ ranges = <0 a9000000 2000>;
+ // If this device had extra parameters, then they would
+ // go here.
+ ps2@0 {
+ compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
+ reg = <0 40>;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = <3 0>;
+ cell-index = <0>;
+ };
+ ps2@1000 {
+ compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
+ reg = <1000 40>;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = <3 0>;
+ cell-index = <0>;
+ };
+ };
+
+ Also, the system.mhs file defines bus attachments from the processor
+ to the devices. The device tree structure should reflect the bus
+ attachments. Again an example; this system.mhs fragment:
+
+ BEGIN ppc405_virtex4
+ PARAMETER INSTANCE = ppc405_0
+ PARAMETER HW_VER = 1.01.a
+ BUS_INTERFACE DPLB = plb_v34_0
+ BUS_INTERFACE IPLB = plb_v34_0
+ END
+
+ BEGIN opb_intc
+ PARAMETER INSTANCE = opb_intc_0
+ PARAMETER HW_VER = 1.00.c
+ PARAMETER C_BASEADDR = 0xD1000FC0
+ PARAMETER C_HIGHADDR = 0xD1000FDF
+ BUS_INTERFACE SOPB = opb_v20_0
+ END
+
+ BEGIN opb_uart16550
+ PARAMETER INSTANCE = opb_uart16550_0
+ PARAMETER HW_VER = 1.00.d
+ PARAMETER C_BASEADDR = 0xa0000000
+ PARAMETER C_HIGHADDR = 0xa0001FFF
+ BUS_INTERFACE SOPB = opb_v20_0
+ END
+
+ BEGIN plb_v34
+ PARAMETER INSTANCE = plb_v34_0
+ PARAMETER HW_VER = 1.02.a
+ END
+
+ BEGIN plb_bram_if_cntlr
+ PARAMETER INSTANCE = plb_bram_if_cntlr_0
+ PARAMETER HW_VER = 1.00.b
+ PARAMETER C_BASEADDR = 0xFFFF0000
+ PARAMETER C_HIGHADDR = 0xFFFFFFFF
+ BUS_INTERFACE SPLB = plb_v34_0
+ END
+
+ BEGIN plb2opb_bridge
+ PARAMETER INSTANCE = plb2opb_bridge_0
+ PARAMETER HW_VER = 1.01.a
+ PARAMETER C_RNG0_BASEADDR = 0x20000000
+ PARAMETER C_RNG0_HIGHADDR = 0x3FFFFFFF
+ PARAMETER C_RNG1_BASEADDR = 0x60000000
+ PARAMETER C_RNG1_HIGHADDR = 0x7FFFFFFF
+ PARAMETER C_RNG2_BASEADDR = 0x80000000
+ PARAMETER C_RNG2_HIGHADDR = 0xBFFFFFFF
+ PARAMETER C_RNG3_BASEADDR = 0xC0000000
+ PARAMETER C_RNG3_HIGHADDR = 0xDFFFFFFF
+ BUS_INTERFACE SPLB = plb_v34_0
+ BUS_INTERFACE MOPB = opb_v20_0
+ END
+
+ Gives this device tree (some properties removed for clarity):
+
+ plb@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v34-1.02.a";
+ device_type = "ibm,plb";
+ ranges; // 1:1 translation
+
+ plb_bram_if_cntrl_0: bram@ffff0000 {
+ reg = <ffff0000 10000>;
+ }
+
+ opb@20000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <20000000 20000000 20000000
+ 60000000 60000000 20000000
+ 80000000 80000000 40000000
+ c0000000 c0000000 20000000>;
+
+ opb_uart16550_0: serial@a0000000 {
+ reg = <a00000000 2000>;
+ };
+
+ opb_intc_0: interrupt-controller@d1000fc0 {
+ reg = <d1000fc0 20>;
+ };
+ };
+ };
+
+ That covers the general approach to binding xilinx IP cores into the
+ device tree. The following are bindings for specific devices:
+
+ i) Xilinx ML300 Framebuffer
+
+ Simple framebuffer device from the ML300 reference design (also on the
+ ML403 reference design as well as others).
+
+ Optional properties:
+ - resolution = <xres yres> : pixel resolution of framebuffer. Some
+ implementations use a different resolution.
+ Default is <d#640 d#480>
+ - virt-resolution = <xvirt yvirt> : Size of framebuffer in memory.
+ Default is <d#1024 d#480>.
+ - rotate-display (empty) : rotate display 180 degrees.
+
+ ii) Xilinx SystemACE
+
+ The Xilinx SystemACE device is used to program FPGAs from an FPGA
+ bitstream stored on a CF card. It can also be used as a generic CF
+ interface device.
+
+ Optional properties:
+ - 8-bit (empty) : Set this property for SystemACE in 8 bit mode
+
+ iii) Xilinx EMAC and Xilinx TEMAC
+
+ Xilinx Ethernet devices. In addition to general xilinx properties
+ listed above, nodes for these devices should include a phy-handle
+ property, and may include other common network device properties
+ like local-mac-address.
+
+ iv) Xilinx Uartlite
+
+ Xilinx uartlite devices are simple fixed speed serial ports.
+
+ Required properties:
+ - current-speed : Baud rate of uartlite
+
+ v) Xilinx hwicap
+
+ Xilinx hwicap devices provide access to the configuration logic
+ of the FPGA through the Internal Configuration Access Port
+ (ICAP). The ICAP enables partial reconfiguration of the FPGA,
+ readback of the configuration information, and some control over
+ 'warm boots' of the FPGA fabric.
+
+ Required properties:
+ - xlnx,family : The family of the FPGA, necessary since the
+ capabilities of the underlying ICAP hardware
+ differ between different families. May be
+ 'virtex2p', 'virtex4', or 'virtex5'.
+
+ vi) Xilinx Uart 16550
+
+ Xilinx UART 16550 devices are very similar to the NS16550 but with
+ different register spacing and an offset from the base address.
+
+ Required properties:
+ - clock-frequency : Frequency of the clock input
+ - reg-offset : A value of 3 is required
+ - reg-shift : A value of 2 is required
+
+
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 1df7f9cdab05..86eabe6c3419 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -73,7 +73,7 @@ The remaining CPU time will be used for user input and other tasks. Because
realtime tasks have explicitly allocated the CPU time they need to perform
their tasks, buffer underruns in the graphics or audio can be eliminated.
-NOTE: the above example is not fully implemented as of yet (2.6.25). We still
+NOTE: the above example is not fully implemented yet. We still
lack an EDF scheduler to make non-uniform periods usable.
@@ -140,14 +140,15 @@ The other option is:
.o CONFIG_CGROUP_SCHED (aka "Basis for grouping tasks" = "Control groups")
-This uses the /cgroup virtual file system and "/cgroup/<cgroup>/cpu.rt_runtime_us"
-to control the CPU time reserved for each control group instead.
+This uses the /cgroup virtual file system and
+"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
+control group instead.
For more information on working with control groups, you should read
Documentation/cgroups/cgroups.txt as well.
-Group settings are checked against the following limits in order to keep the configuration
-schedulable:
+Group settings are checked against the following limits in order to keep the
+configuration schedulable:
\Sum_{i} runtime_{i} / global_period <= global_runtime / global_period
@@ -189,7 +190,7 @@ Implementing SCHED_EDF might take a while to complete. Priority Inheritance is
the biggest challenge as the current linux PI infrastructure is geared towards
the limited static priority levels 0-99. With deadline scheduling you need to
do deadline inheritance (since priority is inversely proportional to the
-deadline delta (deadline - now).
+deadline delta (deadline - now)).
This means the whole PI machinery will have to be reworked - and that is one of
the most complex pieces of code we have.
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 4252697a95d6..f9d11140af91 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -768,6 +768,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
bdl_pos_adj - Specifies the DMA IRQ timing delay in samples.
Passing -1 will make the driver to choose the appropriate
value based on the controller chip.
+ patch - Specifies the early "patch" files to modify the HD-audio
+ setup before initializing the codecs. This option is
+ available only when CONFIG_SND_HDA_PATCH_LOADER=y is set.
+ See HD-Audio.txt for details.
[Single (global) options]
single_cmd - Use single immediate commands to communicate with
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 0d8d23581c44..a1895d7f3cf7 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -114,8 +114,8 @@ ALC662/663/272
samsung-nc10 Samsung NC10 mini notebook
auto auto-config reading BIOS (default)
-ALC882/885
-==========
+ALC882/883/885/888/889
+======================
3stack-dig 3-jack with SPDIF I/O
6stack-dig 6-jack digital with SPDIF I/O
arima Arima W820Di1
@@ -127,12 +127,8 @@ ALC882/885
mbp3 Macbook Pro rev3
imac24 iMac 24'' with jack detection
w2jc ASUS W2JC
- auto auto-config reading BIOS (default)
-
-ALC883/888
-==========
- 3stack-dig 3-jack with SPDIF I/O
- 6stack-dig 6-jack digital with SPDIF I/O
+ 3stack-2ch-dig 3-jack with SPDIF I/O (ALC883)
+ alc883-6stack-dig 6-jack digital with SPDIF I/O (ALC883)
3stack-6ch 3-jack 6-channel
3stack-6ch-dig 3-jack 6-channel with SPDIF I/O
6stack-dig-demo 6-jack digital for Intel demo board
@@ -240,6 +236,7 @@ AD1986A
laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
ultra 2-channel with EAPD (Samsung Ultra tablet PC)
samsung 2-channel with EAPD (Samsung R65)
+ samsung-p50 2-channel with HP-automute (Samsung P50)
AD1988/AD1988B/AD1989A/AD1989B
==============================
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 71ac995b1915..0b5b480708f8 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -403,6 +403,66 @@ re-configure based on that state, run like below:
------------------------------------------------------------------------
+Early Patching
+~~~~~~~~~~~~~~
+When CONFIG_SND_HDA_PATCH_LOADER=y is set, you can pass a "patch" as a
+firmware file for modifying the HD-audio setup before initializing the
+codec. This can work basically like the reconfiguration via sysfs in
+the above, but it does it before the first codec configuration.
+
+A patch file is a plain text file which looks like below:
+
+------------------------------------------------------------------------
+ [codec]
+ 0x12345678 0xabcd1234 2
+
+ [model]
+ auto
+
+ [pincfg]
+ 0x12 0x411111f0
+
+ [verb]
+ 0x20 0x500 0x03
+ 0x20 0x400 0xff
+
+ [hint]
+ hp_detect = yes
+------------------------------------------------------------------------
+
+The file needs to have a line `[codec]`. The next line should contain
+three numbers indicating the codec vendor-id (0x12345678 in the
+example), the codec subsystem-id (0xabcd1234) and the address (2) of
+the codec. The rest patch entries are applied to this specified codec
+until another codec entry is given.
+
+The `[model]` line allows to change the model name of the each codec.
+In the example above, it will be changed to model=auto.
+Note that this overrides the module option.
+
+After the `[pincfg]` line, the contents are parsed as the initial
+default pin-configurations just like `user_pin_configs` sysfs above.
+The values can be shown in user_pin_configs sysfs file, too.
+
+Similarly, the lines after `[verb]` are parsed as `init_verbs`
+sysfs entries, and the lines after `[hint]` are parsed as `hints`
+sysfs entries, respectively.
+
+The hd-audio driver reads the file via request_firmware(). Thus,
+a patch file has to be located on the appropriate firmware path,
+typically, /lib/firmware. For example, when you pass the option
+`patch=hda-init.fw`, the file /lib/firmware/hda-init-fw must be
+present.
+
+The patch module option is specific to each card instance, and you
+need to give one file name for each instance, separated by commas.
+For example, if you have two cards, one for an on-board analog and one
+for an HDMI video board, you may pass patch option like below:
+------------------------------------------------------------------------
+ options snd-hda-intel patch=on-board-patch,hdmi-patch
+------------------------------------------------------------------------
+
+
Power-Saving
~~~~~~~~~~~~
The power-saving is a kind of auto-suspend of the device. When the
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index cf0e3ce0d526..c1a5aad3c75a 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -99,11 +99,13 @@ void parse_opts(int argc, char *argv[])
{ "lsb", 0, 0, 'L' },
{ "cs-high", 0, 0, 'C' },
{ "3wire", 0, 0, '3' },
+ { "no-cs", 0, 0, 'N' },
+ { "ready", 0, 0, 'R' },
{ NULL, 0, 0, 0 },
};
int c;
- c = getopt_long(argc, argv, "D:s:d:b:lHOLC3", lopts, NULL);
+ c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR", lopts, NULL);
if (c == -1)
break;
@@ -139,6 +141,12 @@ void parse_opts(int argc, char *argv[])
case '3':
mode |= SPI_3WIRE;
break;
+ case 'N':
+ mode |= SPI_NO_CS;
+ break;
+ case 'R':
+ mode |= SPI_READY;
+ break;
default:
print_usage(argv[0]);
break;
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 322a00bb99d9..dd8322f40fd0 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -303,25 +303,29 @@ This option can be used to select the type of process address
space randomization that is used in the system, for architectures
that support this feature.
-0 - Turn the process address space randomization off by default.
+0 - Turn the process address space randomization off. This is the
+ default for architectures that do not support this feature anyways,
+ and kernels that are booted with the "norandmaps" parameter.
1 - Make the addresses of mmap base, stack and VDSO page randomized.
This, among other things, implies that shared libraries will be
- loaded to random addresses. Also for PIE-linked binaries, the location
- of code start is randomized.
+ loaded to random addresses. Also for PIE-linked binaries, the
+ location of code start is randomized. This is the default if the
+ CONFIG_COMPAT_BRK option is enabled.
- With heap randomization, the situation is a little bit more
- complicated.
- There a few legacy applications out there (such as some ancient
+2 - Additionally enable heap randomization. This is the default if
+ CONFIG_COMPAT_BRK is disabled.
+
+ There are a few legacy applications out there (such as some ancient
versions of libc.so.5 from 1996) that assume that brk area starts
- just after the end of the code+bss. These applications break when
- start of the brk area is randomized. There are however no known
+ just after the end of the code+bss. These applications break when
+ start of the brk area is randomized. There are however no known
non-legacy applications that would be broken this way, so for most
- systems it is safe to choose full randomization. However there is
- a CONFIG_COMPAT_BRK option for systems with ancient and/or broken
- binaries, that makes heap non-randomized, but keeps all other
- parts of process address space randomized if randomize_va_space
- sysctl is turned on.
+ systems it is safe to choose full randomization.
+
+ Systems with ancient and/or broken binaries should be configured
+ with CONFIG_COMPAT_BRK enabled, which excludes the heap from process
+ address space randomization.
==============================================================
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index f157d7594ea7..2bcc8d4dea29 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -83,6 +83,15 @@ When reading one of these enable files, there are four results:
X - there is a mixture of events enabled and disabled
? - this file does not affect any event
+2.3 Boot option
+---------------
+
+In order to facilitate early boot debugging, use boot option:
+
+ trace_event=[event-list]
+
+The format of this boot option is the same as described in section 2.1.
+
3. Defining an event-enabled tracepoint
=======================================
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 873630e7e53e..2c52f7a70ca5 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -33,7 +33,7 @@
34 -> Terratec Cinergy A Hybrid XS (em2860) [0ccd:004f]
35 -> Typhoon DVD Maker (em2860)
36 -> NetGMBH Cam (em2860)
- 37 -> Gadmei UTV330 (em2860)
+ 37 -> Gadmei UTV330 (em2860) [eb1a:50a6]
38 -> Yakumo MovieMixer (em2861)
39 -> KWorld PVRTV 300U (em2861) [eb1a:e300]
40 -> Plextor ConvertX PX-TV100U (em2861) [093b:a005]
@@ -66,3 +66,4 @@
68 -> Terratec AV350 (em2860) [0ccd:0084]
69 -> KWorld ATSC 315U HDTV TV Box (em2882) [eb1a:a313]
70 -> Evga inDtube (em2882)
+ 71 -> Silvercrest Webcam 1.3mpix (em2820/em2840)
diff --git a/Documentation/vm/slqbinfo.c b/Documentation/vm/slqbinfo.c
new file mode 100644
index 000000000000..3146d3d7c856
--- /dev/null
+++ b/Documentation/vm/slqbinfo.c
@@ -0,0 +1,1047 @@
+/*
+ * Slabinfo: Tool to get reports about slabs
+ *
+ * (C) 2007 sgi, Christoph Lameter
+ *
+ * Reworked by Lin Ming <ming.m.lin@intel.com> for SLQB
+ *
+ * Compile by:
+ *
+ * gcc -o slabinfo slabinfo.c
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <strings.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <getopt.h>
+#include <regex.h>
+#include <errno.h>
+
+#define MAX_SLABS 500
+#define MAX_ALIASES 500
+#define MAX_NODES 1024
+
+struct slabinfo {
+ char *name;
+ int align, cache_dma, destroy_by_rcu;
+ int hwcache_align, object_size, objs_per_slab;
+ int slab_size, store_user;
+ int order, poison, reclaim_account, red_zone;
+ int batch;
+ unsigned long objects, slabs, total_objects;
+ unsigned long alloc, alloc_slab_fill, alloc_slab_new;
+ unsigned long free, free_remote;
+ unsigned long claim_remote_list, claim_remote_list_objects;
+ unsigned long flush_free_list, flush_free_list_objects, flush_free_list_remote;
+ unsigned long flush_rfree_list, flush_rfree_list_objects;
+ unsigned long flush_slab_free, flush_slab_partial;
+ int numa[MAX_NODES];
+ int numa_partial[MAX_NODES];
+} slabinfo[MAX_SLABS];
+
+int slabs = 0;
+int actual_slabs = 0;
+int highest_node = 0;
+
+char buffer[4096];
+
+int show_empty = 0;
+int show_report = 0;
+int show_slab = 0;
+int skip_zero = 1;
+int show_numa = 0;
+int show_track = 0;
+int validate = 0;
+int shrink = 0;
+int show_inverted = 0;
+int show_totals = 0;
+int sort_size = 0;
+int sort_active = 0;
+int set_debug = 0;
+int show_ops = 0;
+int show_activity = 0;
+
+/* Debug options */
+int sanity = 0;
+int redzone = 0;
+int poison = 0;
+int tracking = 0;
+int tracing = 0;
+
+int page_size;
+
+regex_t pattern;
+
+void fatal(const char *x, ...)
+{
+ va_list ap;
+
+ va_start(ap, x);
+ vfprintf(stderr, x, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+void usage(void)
+{
+ printf("slabinfo 5/7/2007. (c) 2007 sgi.\n\n"
+ "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
+ "-A|--activity Most active slabs first\n"
+ "-d<options>|--debug=<options> Set/Clear Debug options\n"
+ "-D|--display-active Switch line format to activity\n"
+ "-e|--empty Show empty slabs\n"
+ "-h|--help Show usage information\n"
+ "-i|--inverted Inverted list\n"
+ "-l|--slabs Show slabs\n"
+ "-n|--numa Show NUMA information\n"
+ "-o|--ops Show kmem_cache_ops\n"
+ "-s|--shrink Shrink slabs\n"
+ "-r|--report Detailed report on single slabs\n"
+ "-S|--Size Sort by size\n"
+ "-t|--tracking Show alloc/free information\n"
+ "-T|--Totals Show summary information\n"
+ "-v|--validate Validate slabs\n"
+ "-z|--zero Include empty slabs\n"
+ "\nValid debug options (FZPUT may be combined)\n"
+ "a / A Switch on all debug options (=FZUP)\n"
+ "- Switch off all debug options\n"
+ "f / F Sanity Checks (SLAB_DEBUG_FREE)\n"
+ "z / Z Redzoning\n"
+ "p / P Poisoning\n"
+ "u / U Tracking\n"
+ "t / T Tracing\n"
+ );
+}
+
+unsigned long read_obj(const char *name)
+{
+ FILE *f = fopen(name, "r");
+
+ if (!f)
+ buffer[0] = 0;
+ else {
+ if (!fgets(buffer, sizeof(buffer), f))
+ buffer[0] = 0;
+ fclose(f);
+ if (buffer[strlen(buffer)] == '\n')
+ buffer[strlen(buffer)] = 0;
+ }
+ return strlen(buffer);
+}
+
+
+/*
+ * Get the contents of an attribute
+ */
+unsigned long get_obj(const char *name)
+{
+ if (!read_obj(name))
+ return 0;
+
+ return atol(buffer);
+}
+
+unsigned long get_obj_and_str(const char *name, char **x)
+{
+ unsigned long result = 0;
+ char *p;
+
+ *x = NULL;
+
+ if (!read_obj(name)) {
+ x = NULL;
+ return 0;
+ }
+ result = strtoul(buffer, &p, 10);
+ while (*p == ' ')
+ p++;
+ if (*p)
+ *x = strdup(p);
+ return result;
+}
+
+void set_obj(struct slabinfo *s, const char *name, int n)
+{
+ char x[100];
+ FILE *f;
+
+ snprintf(x, 100, "%s/%s", s->name, name);
+ f = fopen(x, "w");
+ if (!f)
+ fatal("Cannot write to %s\n", x);
+
+ fprintf(f, "%d\n", n);
+ fclose(f);
+}
+
+unsigned long read_slab_obj(struct slabinfo *s, const char *name)
+{
+ char x[100];
+ FILE *f;
+ size_t l;
+
+ snprintf(x, 100, "%s/%s", s->name, name);
+ f = fopen(x, "r");
+ if (!f) {
+ buffer[0] = 0;
+ l = 0;
+ } else {
+ l = fread(buffer, 1, sizeof(buffer), f);
+ buffer[l] = 0;
+ fclose(f);
+ }
+ return l;
+}
+
+
+/*
+ * Put a size string together
+ */
+int store_size(char *buffer, unsigned long value)
+{
+ unsigned long divisor = 1;
+ char trailer = 0;
+ int n;
+
+ if (value > 1000000000UL) {
+ divisor = 100000000UL;
+ trailer = 'G';
+ } else if (value > 1000000UL) {
+ divisor = 100000UL;
+ trailer = 'M';
+ } else if (value > 1000UL) {
+ divisor = 100;
+ trailer = 'K';
+ }
+
+ value /= divisor;
+ n = sprintf(buffer, "%ld",value);
+ if (trailer) {
+ buffer[n] = trailer;
+ n++;
+ buffer[n] = 0;
+ }
+ if (divisor != 1) {
+ memmove(buffer + n - 2, buffer + n - 3, 4);
+ buffer[n-2] = '.';
+ n++;
+ }
+ return n;
+}
+
+void decode_numa_list(int *numa, char *t)
+{
+ int node;
+ int nr;
+
+ memset(numa, 0, MAX_NODES * sizeof(int));
+
+ if (!t)
+ return;
+
+ while (*t == 'N') {
+ t++;
+ node = strtoul(t, &t, 10);
+ if (*t == '=') {
+ t++;
+ nr = strtoul(t, &t, 10);
+ numa[node] = nr;
+ if (node > highest_node)
+ highest_node = node;
+ }
+ while (*t == ' ')
+ t++;
+ }
+}
+
+void slab_validate(struct slabinfo *s)
+{
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ set_obj(s, "validate", 1);
+}
+
+void slab_shrink(struct slabinfo *s)
+{
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ set_obj(s, "shrink", 1);
+}
+
+int line = 0;
+
+void first_line(void)
+{
+ if (show_activity)
+ printf("Name Objects Alloc Free %%Fill %%New "
+ "FlushR %%FlushR FlushR_Objs O\n");
+ else
+ printf("Name Objects Objsize Space "
+ " O/S O %%Ef Batch Flg\n");
+}
+
+unsigned long slab_size(struct slabinfo *s)
+{
+ return s->slabs * (page_size << s->order);
+}
+
+unsigned long slab_activity(struct slabinfo *s)
+{
+ return s->alloc + s->free;
+}
+
+void slab_numa(struct slabinfo *s, int mode)
+{
+ int node;
+
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ if (!highest_node) {
+ printf("\n%s: No NUMA information available.\n", s->name);
+ return;
+ }
+
+ if (skip_zero && !s->slabs)
+ return;
+
+ if (!line) {
+ printf("\n%-21s:", mode ? "NUMA nodes" : "Slab");
+ for(node = 0; node <= highest_node; node++)
+ printf(" %4d", node);
+ printf("\n----------------------");
+ for(node = 0; node <= highest_node; node++)
+ printf("-----");
+ printf("\n");
+ }
+ printf("%-21s ", mode ? "All slabs" : s->name);
+ for(node = 0; node <= highest_node; node++) {
+ char b[20];
+
+ store_size(b, s->numa[node]);
+ printf(" %4s", b);
+ }
+ printf("\n");
+ if (mode) {
+ printf("%-21s ", "Partial slabs");
+ for(node = 0; node <= highest_node; node++) {
+ char b[20];
+
+ store_size(b, s->numa_partial[node]);
+ printf(" %4s", b);
+ }
+ printf("\n");
+ }
+ line++;
+}
+
+void show_tracking(struct slabinfo *s)
+{
+ printf("\n%s: Kernel object allocation\n", s->name);
+ printf("-----------------------------------------------------------------------\n");
+ if (read_slab_obj(s, "alloc_calls"))
+ printf(buffer);
+ else
+ printf("No Data\n");
+
+ printf("\n%s: Kernel object freeing\n", s->name);
+ printf("------------------------------------------------------------------------\n");
+ if (read_slab_obj(s, "free_calls"))
+ printf(buffer);
+ else
+ printf("No Data\n");
+
+}
+
+void ops(struct slabinfo *s)
+{
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ if (read_slab_obj(s, "ops")) {
+ printf("\n%s: kmem_cache operations\n", s->name);
+ printf("--------------------------------------------\n");
+ printf(buffer);
+ } else
+ printf("\n%s has no kmem_cache operations\n", s->name);
+}
+
+const char *onoff(int x)
+{
+ if (x)
+ return "On ";
+ return "Off";
+}
+
+void slab_stats(struct slabinfo *s)
+{
+ unsigned long total_alloc;
+ unsigned long total_free;
+
+ total_alloc = s->alloc;
+ total_free = s->free;
+
+ if (!total_alloc)
+ return;
+
+ printf("\n");
+ printf("Slab Perf Counter\n");
+ printf("------------------------------------------------------------------------\n");
+ printf("Alloc: %8lu, partial %8lu, page allocator %8lu\n",
+ total_alloc,
+ s->alloc_slab_fill, s->alloc_slab_new);
+ printf("Free: %8lu, partial %8lu, page allocator %8lu, remote %5lu\n",
+ total_free,
+ s->flush_slab_partial,
+ s->flush_slab_free,
+ s->free_remote);
+ printf("Claim: %8lu, objects %8lu\n",
+ s->claim_remote_list,
+ s->claim_remote_list_objects);
+ printf("Flush: %8lu, objects %8lu, remote: %8lu\n",
+ s->flush_free_list,
+ s->flush_free_list_objects,
+ s->flush_free_list_remote);
+ printf("FlushR:%8lu, objects %8lu\n",
+ s->flush_rfree_list,
+ s->flush_rfree_list_objects);
+}
+
+void report(struct slabinfo *s)
+{
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ printf("\nSlabcache: %-20s Order : %2d Objects: %lu\n",
+ s->name, s->order, s->objects);
+ if (s->hwcache_align)
+ printf("** Hardware cacheline aligned\n");
+ if (s->cache_dma)
+ printf("** Memory is allocated in a special DMA zone\n");
+ if (s->destroy_by_rcu)
+ printf("** Slabs are destroyed via RCU\n");
+ if (s->reclaim_account)
+ printf("** Reclaim accounting active\n");
+
+ printf("\nSizes (bytes) Slabs Debug Memory\n");
+ printf("------------------------------------------------------------------------\n");
+ printf("Object : %7d Total : %7ld Sanity Checks : %s Total: %7ld\n",
+ s->object_size, s->slabs, "N/A",
+ s->slabs * (page_size << s->order));
+ printf("SlabObj: %7d Full : %7s Redzoning : %s Used : %7ld\n",
+ s->slab_size, "N/A",
+ onoff(s->red_zone), s->objects * s->object_size);
+ printf("SlabSiz: %7d Partial: %7s Poisoning : %s Loss : %7ld\n",
+ page_size << s->order, "N/A", onoff(s->poison),
+ s->slabs * (page_size << s->order) - s->objects * s->object_size);
+ printf("Loss : %7d CpuSlab: %7s Tracking : %s Lalig: %7ld\n",
+ s->slab_size - s->object_size, "N/A", onoff(s->store_user),
+ (s->slab_size - s->object_size) * s->objects);
+ printf("Align : %7d Objects: %7d Tracing : %s Lpadd: %7ld\n",
+ s->align, s->objs_per_slab, "N/A",
+ ((page_size << s->order) - s->objs_per_slab * s->slab_size) *
+ s->slabs);
+
+ ops(s);
+ show_tracking(s);
+ slab_numa(s, 1);
+ slab_stats(s);
+}
+
+void slabcache(struct slabinfo *s)
+{
+ char size_str[20];
+ char flags[20];
+ char *p = flags;
+
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ if (actual_slabs == 1) {
+ report(s);
+ return;
+ }
+
+ if (skip_zero && !show_empty && !s->slabs)
+ return;
+
+ if (show_empty && s->slabs)
+ return;
+
+ store_size(size_str, slab_size(s));
+
+ if (!line++)
+ first_line();
+
+ if (s->cache_dma)
+ *p++ = 'd';
+ if (s->hwcache_align)
+ *p++ = 'A';
+ if (s->poison)
+ *p++ = 'P';
+ if (s->reclaim_account)
+ *p++ = 'a';
+ if (s->red_zone)
+ *p++ = 'Z';
+ if (s->store_user)
+ *p++ = 'U';
+
+ *p = 0;
+ if (show_activity) {
+ unsigned long total_alloc;
+ unsigned long total_free;
+
+ total_alloc = s->alloc;
+ total_free = s->free;
+
+ printf("%-21s %8ld %10ld %10ld %5ld %5ld %7ld %5ld %7ld %8d\n",
+ s->name, s->objects,
+ total_alloc, total_free,
+ total_alloc ? (s->alloc_slab_fill * 100 / total_alloc) : 0,
+ total_alloc ? (s->alloc_slab_new * 100 / total_alloc) : 0,
+ s->flush_rfree_list,
+ s->flush_rfree_list * 100 / (total_alloc + total_free),
+ s->flush_rfree_list_objects,
+ s->order);
+ }
+ else
+ printf("%-21s %8ld %7d %8s %4d %1d %3ld %4d %s\n",
+ s->name, s->objects, s->object_size, size_str,
+ s->objs_per_slab, s->order,
+ s->slabs ? (s->objects * s->object_size * 100) /
+ (s->slabs * (page_size << s->order)) : 100,
+ s->batch, flags);
+}
+
+/*
+ * Analyze debug options. Return false if something is amiss.
+ */
+int debug_opt_scan(char *opt)
+{
+ if (!opt || !opt[0] || strcmp(opt, "-") == 0)
+ return 1;
+
+ if (strcasecmp(opt, "a") == 0) {
+ sanity = 1;
+ poison = 1;
+ redzone = 1;
+ tracking = 1;
+ return 1;
+ }
+
+ for ( ; *opt; opt++)
+ switch (*opt) {
+ case 'F' : case 'f':
+ if (sanity)
+ return 0;
+ sanity = 1;
+ break;
+ case 'P' : case 'p':
+ if (poison)
+ return 0;
+ poison = 1;
+ break;
+
+ case 'Z' : case 'z':
+ if (redzone)
+ return 0;
+ redzone = 1;
+ break;
+
+ case 'U' : case 'u':
+ if (tracking)
+ return 0;
+ tracking = 1;
+ break;
+
+ case 'T' : case 't':
+ if (tracing)
+ return 0;
+ tracing = 1;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+int slab_empty(struct slabinfo *s)
+{
+ if (s->objects > 0)
+ return 0;
+
+ /*
+ * We may still have slabs even if there are no objects. Shrinking will
+ * remove them.
+ */
+ if (s->slabs != 0)
+ set_obj(s, "shrink", 1);
+
+ return 1;
+}
+
+void slab_debug(struct slabinfo *s)
+{
+ if (strcmp(s->name, "*") == 0)
+ return;
+
+ if (redzone && !s->red_zone) {
+ if (slab_empty(s))
+ set_obj(s, "red_zone", 1);
+ else
+ fprintf(stderr, "%s not empty cannot enable redzoning\n", s->name);
+ }
+ if (!redzone && s->red_zone) {
+ if (slab_empty(s))
+ set_obj(s, "red_zone", 0);
+ else
+ fprintf(stderr, "%s not empty cannot disable redzoning\n", s->name);
+ }
+ if (poison && !s->poison) {
+ if (slab_empty(s))
+ set_obj(s, "poison", 1);
+ else
+ fprintf(stderr, "%s not empty cannot enable poisoning\n", s->name);
+ }
+ if (!poison && s->poison) {
+ if (slab_empty(s))
+ set_obj(s, "poison", 0);
+ else
+ fprintf(stderr, "%s not empty cannot disable poisoning\n", s->name);
+ }
+ if (tracking && !s->store_user) {
+ if (slab_empty(s))
+ set_obj(s, "store_user", 1);
+ else
+ fprintf(stderr, "%s not empty cannot enable tracking\n", s->name);
+ }
+ if (!tracking && s->store_user) {
+ if (slab_empty(s))
+ set_obj(s, "store_user", 0);
+ else
+ fprintf(stderr, "%s not empty cannot disable tracking\n", s->name);
+ }
+}
+
+void totals(void)
+{
+ struct slabinfo *s;
+
+ int used_slabs = 0;
+ char b1[20], b2[20], b3[20], b4[20];
+ unsigned long long max = 1ULL << 63;
+
+ /* Object size */
+ unsigned long long min_objsize = max, max_objsize = 0, avg_objsize;
+
+ /* Number of slabs in a slab cache */
+ unsigned long long min_slabs = max, max_slabs = 0,
+ avg_slabs, total_slabs = 0;
+
+ /* Size of the whole slab */
+ unsigned long long min_size = max, max_size = 0,
+ avg_size, total_size = 0;
+
+ /* Bytes used for object storage in a slab */
+ unsigned long long min_used = max, max_used = 0,
+ avg_used, total_used = 0;
+
+ /* Waste: Bytes used for alignment and padding */
+ unsigned long long min_waste = max, max_waste = 0,
+ avg_waste, total_waste = 0;
+ /* Number of objects in a slab */
+ unsigned long long min_objects = max, max_objects = 0,
+ avg_objects, total_objects = 0;
+ /* Waste per object */
+ unsigned long long min_objwaste = max,
+ max_objwaste = 0, avg_objwaste,
+ total_objwaste = 0;
+
+ /* Memory per object */
+ unsigned long long min_memobj = max,
+ max_memobj = 0, avg_memobj,
+ total_objsize = 0;
+
+ for (s = slabinfo; s < slabinfo + slabs; s++) {
+ unsigned long long size;
+ unsigned long used;
+ unsigned long long wasted;
+ unsigned long long objwaste;
+
+ if (!s->slabs || !s->objects)
+ continue;
+
+ used_slabs++;
+
+ size = slab_size(s);
+ used = s->objects * s->object_size;
+ wasted = size - used;
+ objwaste = s->slab_size - s->object_size;
+
+ if (s->object_size < min_objsize)
+ min_objsize = s->object_size;
+ if (s->slabs < min_slabs)
+ min_slabs = s->slabs;
+ if (size < min_size)
+ min_size = size;
+ if (wasted < min_waste)
+ min_waste = wasted;
+ if (objwaste < min_objwaste)
+ min_objwaste = objwaste;
+ if (s->objects < min_objects)
+ min_objects = s->objects;
+ if (used < min_used)
+ min_used = used;
+ if (s->slab_size < min_memobj)
+ min_memobj = s->slab_size;
+
+ if (s->object_size > max_objsize)
+ max_objsize = s->object_size;
+ if (s->slabs > max_slabs)
+ max_slabs = s->slabs;
+ if (size > max_size)
+ max_size = size;
+ if (wasted > max_waste)
+ max_waste = wasted;
+ if (objwaste > max_objwaste)
+ max_objwaste = objwaste;
+ if (s->objects > max_objects)
+ max_objects = s->objects;
+ if (used > max_used)
+ max_used = used;
+ if (s->slab_size > max_memobj)
+ max_memobj = s->slab_size;
+
+ total_slabs += s->slabs;
+ total_size += size;
+ total_waste += wasted;
+
+ total_objects += s->objects;
+ total_used += used;
+
+ total_objwaste += s->objects * objwaste;
+ total_objsize += s->objects * s->slab_size;
+ }
+
+ if (!total_objects) {
+ printf("No objects\n");
+ return;
+ }
+ if (!used_slabs) {
+ printf("No slabs\n");
+ return;
+ }
+
+ /* Per slab averages */
+ avg_slabs = total_slabs / used_slabs;
+ avg_size = total_size / used_slabs;
+ avg_waste = total_waste / used_slabs;
+
+ avg_objects = total_objects / used_slabs;
+ avg_used = total_used / used_slabs;
+
+ /* Per object object sizes */
+ avg_objsize = total_used / total_objects;
+ avg_objwaste = total_objwaste / total_objects;
+ avg_memobj = total_objsize / total_objects;
+
+ printf("Slabcache Totals\n");
+ printf("----------------\n");
+ printf("Slabcaches : %3d Active: %3d\n",
+ slabs, used_slabs);
+
+ store_size(b1, total_size);store_size(b2, total_waste);
+ store_size(b3, total_waste * 100 / total_used);
+ printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3);
+
+ store_size(b1, total_objects);
+ printf("# Objects : %6s\n", b1);
+
+ printf("\n");
+ printf("Per Cache Average Min Max Total\n");
+ printf("---------------------------------------------------------\n");
+
+ store_size(b1, avg_objects);store_size(b2, min_objects);
+ store_size(b3, max_objects);store_size(b4, total_objects);
+ printf("#Objects %10s %10s %10s %10s\n",
+ b1, b2, b3, b4);
+
+ store_size(b1, avg_slabs);store_size(b2, min_slabs);
+ store_size(b3, max_slabs);store_size(b4, total_slabs);
+ printf("#Slabs %10s %10s %10s %10s\n",
+ b1, b2, b3, b4);
+
+ store_size(b1, avg_size);store_size(b2, min_size);
+ store_size(b3, max_size);store_size(b4, total_size);
+ printf("Memory %10s %10s %10s %10s\n",
+ b1, b2, b3, b4);
+
+ store_size(b1, avg_used);store_size(b2, min_used);
+ store_size(b3, max_used);store_size(b4, total_used);
+ printf("Used %10s %10s %10s %10s\n",
+ b1, b2, b3, b4);
+
+ store_size(b1, avg_waste);store_size(b2, min_waste);
+ store_size(b3, max_waste);store_size(b4, total_waste);
+ printf("Loss %10s %10s %10s %10s\n",
+ b1, b2, b3, b4);
+
+ printf("\n");
+ printf("Per Object Average Min Max\n");
+ printf("---------------------------------------------\n");
+
+ store_size(b1, avg_memobj);store_size(b2, min_memobj);
+ store_size(b3, max_memobj);
+ printf("Memory %10s %10s %10s\n",
+ b1, b2, b3);
+ store_size(b1, avg_objsize);store_size(b2, min_objsize);
+ store_size(b3, max_objsize);
+ printf("User %10s %10s %10s\n",
+ b1, b2, b3);
+
+ store_size(b1, avg_objwaste);store_size(b2, min_objwaste);
+ store_size(b3, max_objwaste);
+ printf("Loss %10s %10s %10s\n",
+ b1, b2, b3);
+}
+
+void sort_slabs(void)
+{
+ struct slabinfo *s1,*s2;
+
+ for (s1 = slabinfo; s1 < slabinfo + slabs; s1++) {
+ for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
+ int result;
+
+ if (sort_size)
+ result = slab_size(s1) < slab_size(s2);
+ else if (sort_active)
+ result = slab_activity(s1) < slab_activity(s2);
+ else
+ result = strcasecmp(s1->name, s2->name);
+
+ if (show_inverted)
+ result = -result;
+
+ if (result > 0) {
+ struct slabinfo t;
+
+ memcpy(&t, s1, sizeof(struct slabinfo));
+ memcpy(s1, s2, sizeof(struct slabinfo));
+ memcpy(s2, &t, sizeof(struct slabinfo));
+ }
+ }
+ }
+}
+
+int slab_mismatch(char *slab)
+{
+ return regexec(&pattern, slab, 0, NULL, 0);
+}
+
+void read_slab_dir(void)
+{
+ DIR *dir;
+ struct dirent *de;
+ struct slabinfo *slab = slabinfo;
+ char *t;
+
+ if (chdir("/sys/kernel/slab") && chdir("/sys/slab"))
+ fatal("SYSFS support for SLUB not active\n");
+
+ dir = opendir(".");
+ while ((de = readdir(dir))) {
+ if (de->d_name[0] == '.' ||
+ (de->d_name[0] != ':' && slab_mismatch(de->d_name)))
+ continue;
+ switch (de->d_type) {
+ case DT_DIR:
+ if (chdir(de->d_name))
+ fatal("Unable to access slab %s\n", slab->name);
+ slab->name = strdup(de->d_name);
+ slab->align = get_obj("align");
+ slab->cache_dma = get_obj("cache_dma");
+ slab->destroy_by_rcu = get_obj("destroy_by_rcu");
+ slab->hwcache_align = get_obj("hwcache_align");
+ slab->object_size = get_obj("object_size");
+ slab->objects = get_obj("objects");
+ slab->total_objects = get_obj("total_objects");
+ slab->objs_per_slab = get_obj("objs_per_slab");
+ slab->order = get_obj("order");
+ slab->poison = get_obj("poison");
+ slab->reclaim_account = get_obj("reclaim_account");
+ slab->red_zone = get_obj("red_zone");
+ slab->slab_size = get_obj("slab_size");
+ slab->slabs = get_obj_and_str("slabs", &t);
+ decode_numa_list(slab->numa, t);
+ free(t);
+ slab->store_user = get_obj("store_user");
+ slab->batch = get_obj("batch");
+ slab->alloc = get_obj("alloc");
+ slab->alloc_slab_fill = get_obj("alloc_slab_fill");
+ slab->alloc_slab_new = get_obj("alloc_slab_new");
+ slab->free = get_obj("free");
+ slab->free_remote = get_obj("free_remote");
+ slab->claim_remote_list = get_obj("claim_remote_list");
+ slab->claim_remote_list_objects = get_obj("claim_remote_list_objects");
+ slab->flush_free_list = get_obj("flush_free_list");
+ slab->flush_free_list_objects = get_obj("flush_free_list_objects");
+ slab->flush_free_list_remote = get_obj("flush_free_list_remote");
+ slab->flush_rfree_list = get_obj("flush_rfree_list");
+ slab->flush_rfree_list_objects = get_obj("flush_rfree_list_objects");
+ slab->flush_slab_free = get_obj("flush_slab_free");
+ slab->flush_slab_partial = get_obj("flush_slab_partial");
+
+ chdir("..");
+ slab++;
+ break;
+ default :
+ fatal("Unknown file type %lx\n", de->d_type);
+ }
+ }
+ closedir(dir);
+ slabs = slab - slabinfo;
+ actual_slabs = slabs;
+ if (slabs > MAX_SLABS)
+ fatal("Too many slabs\n");
+}
+
+void output_slabs(void)
+{
+ struct slabinfo *slab;
+
+ for (slab = slabinfo; slab < slabinfo + slabs; slab++) {
+
+ if (show_numa)
+ slab_numa(slab, 0);
+ else if (show_track)
+ show_tracking(slab);
+ else if (validate)
+ slab_validate(slab);
+ else if (shrink)
+ slab_shrink(slab);
+ else if (set_debug)
+ slab_debug(slab);
+ else if (show_ops)
+ ops(slab);
+ else if (show_slab)
+ slabcache(slab);
+ else if (show_report)
+ report(slab);
+ }
+}
+
+struct option opts[] = {
+ { "activity", 0, NULL, 'A' },
+ { "debug", 2, NULL, 'd' },
+ { "display-activity", 0, NULL, 'D' },
+ { "empty", 0, NULL, 'e' },
+ { "help", 0, NULL, 'h' },
+ { "inverted", 0, NULL, 'i'},
+ { "numa", 0, NULL, 'n' },
+ { "ops", 0, NULL, 'o' },
+ { "report", 0, NULL, 'r' },
+ { "shrink", 0, NULL, 's' },
+ { "slabs", 0, NULL, 'l' },
+ { "track", 0, NULL, 't'},
+ { "validate", 0, NULL, 'v' },
+ { "zero", 0, NULL, 'z' },
+ { "1ref", 0, NULL, '1'},
+ { NULL, 0, NULL, 0 }
+};
+
+int main(int argc, char *argv[])
+{
+ int c;
+ int err;
+ char *pattern_source;
+
+ page_size = getpagesize();
+
+ while ((c = getopt_long(argc, argv, "Ad::Dehil1noprstvzTS",
+ opts, NULL)) != -1)
+ switch (c) {
+ case 'A':
+ sort_active = 1;
+ break;
+ case 'd':
+ set_debug = 1;
+ if (!debug_opt_scan(optarg))
+ fatal("Invalid debug option '%s'\n", optarg);
+ break;
+ case 'D':
+ show_activity = 1;
+ break;
+ case 'e':
+ show_empty = 1;
+ break;
+ case 'h':
+ usage();
+ return 0;
+ case 'i':
+ show_inverted = 1;
+ break;
+ case 'n':
+ show_numa = 1;
+ break;
+ case 'o':
+ show_ops = 1;
+ break;
+ case 'r':
+ show_report = 1;
+ break;
+ case 's':
+ shrink = 1;
+ break;
+ case 'l':
+ show_slab = 1;
+ break;
+ case 't':
+ show_track = 1;
+ break;
+ case 'v':
+ validate = 1;
+ break;
+ case 'z':
+ skip_zero = 0;
+ break;
+ case 'T':
+ show_totals = 1;
+ break;
+ case 'S':
+ sort_size = 1;
+ break;
+
+ default:
+ fatal("%s: Invalid option '%c'\n", argv[0], optopt);
+
+ }
+
+ if (!show_slab && !show_track && !show_report
+ && !validate && !shrink && !set_debug && !show_ops)
+ show_slab = 1;
+
+ if (argc > optind)
+ pattern_source = argv[optind];
+ else
+ pattern_source = ".*";
+
+ err = regcomp(&pattern, pattern_source, REG_ICASE|REG_NOSUB);
+ if (err)
+ fatal("%s: Invalid pattern '%s' code %d\n",
+ argv[0], pattern_source, err);
+ read_slab_dir();
+ if (show_totals)
+ totals();
+ else {
+ sort_slabs();
+ output_slabs();
+ }
+ return 0;
+}
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
index dbe3377754af..f37b46d34861 100644
--- a/Documentation/x86/00-INDEX
+++ b/Documentation/x86/00-INDEX
@@ -2,3 +2,5 @@
- this file
mtrr.txt
- how to use x86 Memory Type Range Registers to increase performance
+exception-tables.txt
+ - why and how Linux kernel uses exception tables on x86
diff --git a/Documentation/exception.txt b/Documentation/x86/exception-tables.txt
index 2d5aded64247..32901aa36f0a 100644
--- a/Documentation/exception.txt
+++ b/Documentation/x86/exception-tables.txt
@@ -1,123 +1,123 @@
- Kernel level exception handling in Linux 2.1.8
+ Kernel level exception handling in Linux
Commentary by Joerg Pommnitz <joerg@raleigh.ibm.com>
-When a process runs in kernel mode, it often has to access user
-mode memory whose address has been passed by an untrusted program.
+When a process runs in kernel mode, it often has to access user
+mode memory whose address has been passed by an untrusted program.
To protect itself the kernel has to verify this address.
-In older versions of Linux this was done with the
-int verify_area(int type, const void * addr, unsigned long size)
+In older versions of Linux this was done with the
+int verify_area(int type, const void * addr, unsigned long size)
function (which has since been replaced by access_ok()).
-This function verified that the memory area starting at address
+This function verified that the memory area starting at address
'addr' and of size 'size' was accessible for the operation specified
-in type (read or write). To do this, verify_read had to look up the
-virtual memory area (vma) that contained the address addr. In the
-normal case (correctly working program), this test was successful.
+in type (read or write). To do this, verify_read had to look up the
+virtual memory area (vma) that contained the address addr. In the
+normal case (correctly working program), this test was successful.
It only failed for a few buggy programs. In some kernel profiling
tests, this normally unneeded verification used up a considerable
amount of time.
-To overcome this situation, Linus decided to let the virtual memory
+To overcome this situation, Linus decided to let the virtual memory
hardware present in every Linux-capable CPU handle this test.
How does this work?
-Whenever the kernel tries to access an address that is currently not
-accessible, the CPU generates a page fault exception and calls the
-page fault handler
+Whenever the kernel tries to access an address that is currently not
+accessible, the CPU generates a page fault exception and calls the
+page fault handler
void do_page_fault(struct pt_regs *regs, unsigned long error_code)
-in arch/i386/mm/fault.c. The parameters on the stack are set up by
-the low level assembly glue in arch/i386/kernel/entry.S. The parameter
-regs is a pointer to the saved registers on the stack, error_code
+in arch/x86/mm/fault.c. The parameters on the stack are set up by
+the low level assembly glue in arch/x86/kernel/entry_32.S. The parameter
+regs is a pointer to the saved registers on the stack, error_code
contains a reason code for the exception.
-do_page_fault first obtains the unaccessible address from the CPU
-control register CR2. If the address is within the virtual address
-space of the process, the fault probably occurred, because the page
-was not swapped in, write protected or something similar. However,
-we are interested in the other case: the address is not valid, there
-is no vma that contains this address. In this case, the kernel jumps
-to the bad_area label.
-
-There it uses the address of the instruction that caused the exception
-(i.e. regs->eip) to find an address where the execution can continue
-(fixup). If this search is successful, the fault handler modifies the
-return address (again regs->eip) and returns. The execution will
+do_page_fault first obtains the unaccessible address from the CPU
+control register CR2. If the address is within the virtual address
+space of the process, the fault probably occurred, because the page
+was not swapped in, write protected or something similar. However,
+we are interested in the other case: the address is not valid, there
+is no vma that contains this address. In this case, the kernel jumps
+to the bad_area label.
+
+There it uses the address of the instruction that caused the exception
+(i.e. regs->eip) to find an address where the execution can continue
+(fixup). If this search is successful, the fault handler modifies the
+return address (again regs->eip) and returns. The execution will
continue at the address in fixup.
Where does fixup point to?
-Since we jump to the contents of fixup, fixup obviously points
-to executable code. This code is hidden inside the user access macros.
-I have picked the get_user macro defined in include/asm/uaccess.h as an
-example. The definition is somewhat hard to follow, so let's peek at
+Since we jump to the contents of fixup, fixup obviously points
+to executable code. This code is hidden inside the user access macros.
+I have picked the get_user macro defined in arch/x86/include/asm/uaccess.h
+as an example. The definition is somewhat hard to follow, so let's peek at
the code generated by the preprocessor and the compiler. I selected
-the get_user call in drivers/char/console.c for a detailed examination.
+the get_user call in drivers/char/sysrq.c for a detailed examination.
-The original code in console.c line 1405:
+The original code in sysrq.c line 587:
get_user(c, buf);
The preprocessor output (edited to become somewhat readable):
(
- {
- long __gu_err = - 14 , __gu_val = 0;
- const __typeof__(*( ( buf ) )) *__gu_addr = ((buf));
- if (((((0 + current_set[0])->tss.segment) == 0x18 ) ||
- (((sizeof(*(buf))) <= 0xC0000000UL) &&
- ((unsigned long)(__gu_addr ) <= 0xC0000000UL - (sizeof(*(buf)))))))
+ {
+ long __gu_err = - 14 , __gu_val = 0;
+ const __typeof__(*( ( buf ) )) *__gu_addr = ((buf));
+ if (((((0 + current_set[0])->tss.segment) == 0x18 ) ||
+ (((sizeof(*(buf))) <= 0xC0000000UL) &&
+ ((unsigned long)(__gu_addr ) <= 0xC0000000UL - (sizeof(*(buf)))))))
do {
- __gu_err = 0;
- switch ((sizeof(*(buf)))) {
- case 1:
- __asm__ __volatile__(
- "1: mov" "b" " %2,%" "b" "1\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %3,%0\n"
- " xor" "b" " %" "b" "1,%" "b" "1\n"
- " jmp 2b\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
+ __gu_err = 0;
+ switch ((sizeof(*(buf)))) {
+ case 1:
+ __asm__ __volatile__(
+ "1: mov" "b" " %2,%" "b" "1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %3,%0\n"
+ " xor" "b" " %" "b" "1,%" "b" "1\n"
+ " jmp 2b\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
".text" : "=r"(__gu_err), "=q" (__gu_val): "m"((*(struct __large_struct *)
- ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err )) ;
- break;
- case 2:
+ ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err )) ;
+ break;
+ case 2:
__asm__ __volatile__(
- "1: mov" "w" " %2,%" "w" "1\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %3,%0\n"
- " xor" "w" " %" "w" "1,%" "w" "1\n"
- " jmp 2b\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
+ "1: mov" "w" " %2,%" "w" "1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %3,%0\n"
+ " xor" "w" " %" "w" "1,%" "w" "1\n"
+ " jmp 2b\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
".text" : "=r"(__gu_err), "=r" (__gu_val) : "m"((*(struct __large_struct *)
- ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err ));
- break;
- case 4:
- __asm__ __volatile__(
- "1: mov" "l" " %2,%" "" "1\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %3,%0\n"
- " xor" "l" " %" "" "1,%" "" "1\n"
- " jmp 2b\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n" " .long 1b,3b\n"
+ ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err ));
+ break;
+ case 4:
+ __asm__ __volatile__(
+ "1: mov" "l" " %2,%" "" "1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %3,%0\n"
+ " xor" "l" " %" "" "1,%" "" "1\n"
+ " jmp 2b\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n" " .long 1b,3b\n"
".text" : "=r"(__gu_err), "=r" (__gu_val) : "m"((*(struct __large_struct *)
- ( __gu_addr )) ), "i"(- 14 ), "0"(__gu_err));
- break;
- default:
- (__gu_val) = __get_user_bad();
- }
- } while (0) ;
- ((c)) = (__typeof__(*((buf))))__gu_val;
+ ( __gu_addr )) ), "i"(- 14 ), "0"(__gu_err));
+ break;
+ default:
+ (__gu_val) = __get_user_bad();
+ }
+ } while (0) ;
+ ((c)) = (__typeof__(*((buf))))__gu_val;
__gu_err;
}
);
@@ -127,12 +127,12 @@ see what code gcc generates:
> xorl %edx,%edx
> movl current_set,%eax
- > cmpl $24,788(%eax)
- > je .L1424
+ > cmpl $24,788(%eax)
+ > je .L1424
> cmpl $-1073741825,64(%esp)
- > ja .L1423
+ > ja .L1423
> .L1424:
- > movl %edx,%eax
+ > movl %edx,%eax
> movl 64(%esp),%ebx
> #APP
> 1: movb (%ebx),%dl /* this is the actual user access */
@@ -149,17 +149,17 @@ see what code gcc generates:
> .L1423:
> movzbl %dl,%esi
-The optimizer does a good job and gives us something we can actually
-understand. Can we? The actual user access is quite obvious. Thanks
-to the unified address space we can just access the address in user
+The optimizer does a good job and gives us something we can actually
+understand. Can we? The actual user access is quite obvious. Thanks
+to the unified address space we can just access the address in user
memory. But what does the .section stuff do?????
To understand this we have to look at the final kernel:
> objdump --section-headers vmlinux
- >
+ >
> vmlinux: file format elf32-i386
- >
+ >
> Sections:
> Idx Name Size VMA LMA File off Algn
> 0 .text 00098f40 c0100000 c0100000 00001000 2**4
@@ -198,18 +198,18 @@ final kernel executable:
The whole user memory access is reduced to 10 x86 machine instructions.
The instructions bracketed in the .section directives are no longer
-in the normal execution path. They are located in a different section
+in the normal execution path. They are located in a different section
of the executable file:
> objdump --disassemble --section=.fixup vmlinux
- >
+ >
> c0199ff5 <.fixup+10b5> movl $0xfffffff2,%eax
> c0199ffa <.fixup+10ba> xorb %dl,%dl
> c0199ffc <.fixup+10bc> jmp c017e7a7 <do_con_write+e3>
And finally:
> objdump --full-contents --section=__ex_table vmlinux
- >
+ >
> c01aa7c4 93c017c0 e09f19c0 97c017c0 99c017c0 ................
> c01aa7d4 f6c217c0 e99f19c0 a5e717c0 f59f19c0 ................
> c01aa7e4 080a18c0 01a019c0 0a0a18c0 04a019c0 ................
@@ -235,8 +235,8 @@ sections in the ELF object file. So the instructions
ended up in the .fixup section of the object file and the addresses
.long 1b,3b
ended up in the __ex_table section of the object file. 1b and 3b
-are local labels. The local label 1b (1b stands for next label 1
-backward) is the address of the instruction that might fault, i.e.
+are local labels. The local label 1b (1b stands for next label 1
+backward) is the address of the instruction that might fault, i.e.
in our case the address of the label 1 is c017e7a5:
the original assembly code: > 1: movb (%ebx),%dl
and linked in vmlinux : > c017e7a5 <do_con_write+e1> movb (%ebx),%dl
@@ -254,7 +254,7 @@ The assembly code
becomes the value pair
> c01aa7d4 c017c2f6 c0199fe9 c017e7a5 c0199ff5 ................
^this is ^this is
- 1b 3b
+ 1b 3b
c017e7a5,c0199ff5 in the exception table of the kernel.
So, what actually happens if a fault from kernel mode with no suitable
@@ -266,9 +266,9 @@ vma occurs?
3.) CPU calls do_page_fault
4.) do page fault calls search_exception_table (regs->eip == c017e7a5);
5.) search_exception_table looks up the address c017e7a5 in the
- exception table (i.e. the contents of the ELF section __ex_table)
+ exception table (i.e. the contents of the ELF section __ex_table)
and returns the address of the associated fault handle code c0199ff5.
-6.) do_page_fault modifies its own return address to point to the fault
+6.) do_page_fault modifies its own return address to point to the fault
handle code and returns.
7.) execution continues in the fault handling code.
8.) 8a) EAX becomes -EFAULT (== -14)
diff --git a/MAINTAINERS b/MAINTAINERS
index fa2a16def17a..c51cfd4777aa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -867,12 +867,22 @@ M: alex@shark-linux.de
W: http://www.shark-linux.de/shark.html
S: Maintained
+ARM/SAMSUNG ARM ARCHITECTURES
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/plat-s3c/
+F: arch/arm/plat-s3c24xx/
+
ARM/S3C2410 ARM ARCHITECTURE
P: Ben Dooks
M: ben-linux@fluff.org
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
W: http://www.fluff.org/ben/linux/
S: Maintained
+F: arch/arm/mach-s3c2410/
ARM/S3C2440 ARM ARCHITECTURE
P: Ben Dooks
@@ -880,6 +890,39 @@ M: ben-linux@fluff.org
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
W: http://www.fluff.org/ben/linux/
S: Maintained
+F: arch/arm/mach-s3c2440/
+
+ARM/S3C2442 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c2442/
+
+ARM/S3C2443 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c2443/
+
+ARM/S3C6400 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c6400/
+
+ARM/S3C6410 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c6410/
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
P: Lennert Buytenhek
@@ -2087,9 +2130,9 @@ F: drivers/edac/i5400_edac.c
EDAC-I82975X
P: Ranganathan Desikan
-M: rdesikan@jetzbroadband.com
+M: ravi@jetztechnologies.com
P: Arvind R.
-M: arvind@acarlab.com
+M: arvind@jetztechnologies.com
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
@@ -2527,6 +2570,15 @@ W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
S: Maintained
F: drivers/hwmon/hdaps.c
+HARDWARE LATENCY DETECTOR
+P: Jon Masters
+M: jcm@jonmasters.org
+W: http://www.kernel.org/pub/linux/kernel/people/jcm/hwlat_detector/
+S: Supported
+L: linux-kernel@vger.kernel.org
+F: Documentation/hwlat_detector.txt
+F: drivers/misc/hwlat_detector.c
+
HYPERVISOR VIRTUAL CONSOLE DRIVER
L: linuxppc-dev@ozlabs.org
S: Odd Fixes
@@ -2808,7 +2860,9 @@ S: Maintained
IA64 (Itanium) PLATFORM
P: Tony Luck
+P: Fenghua Yu
M: tony.luck@intel.com
+M: fenghua.yu@intel.com
L: linux-ia64@vger.kernel.org
W: http://www.ia64-linux.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git
@@ -2886,7 +2940,7 @@ P: Dmitry Eremin-Solenikov
M: dbaryshkov@gmail.com
P: Sergey Lapin
M: slapin@ossfans.org
-L: linux-zigbee-devel@lists.sourceforge.net
+L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
@@ -3331,7 +3385,7 @@ F: scripts/Makefile.*
KERNEL JANITORS
P: Several
L: kernel-janitors@vger.kernel.org
-W: http://www.kerneljanitors.org/
+W: http://janitor.kernelnewbies.org/
S: Maintained
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
@@ -4362,7 +4416,7 @@ W: http://www.nongnu.org/orinoco/
S: Maintained
F: drivers/net/wireless/orinoco/
-OSD LIBRARY
+OSD LIBRARY and FILESYSTEM
P: Boaz Harrosh
M: bharrosh@panasas.com
P: Benny Halevy
@@ -4371,6 +4425,9 @@ L: osd-dev@open-osd.org
W: http://open-osd.org
T: git git://git.open-osd.org/open-osd.git
S: Maintained
+F: drivers/scsi/osd/
+F: drivers/include/scsi/osd_*
+F: fs/exofs/
P54 WIRELESS DRIVER
P: Michael Wu
@@ -5270,6 +5327,12 @@ F: arch/arm/mach-s3c2410/
F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
+TI DAVINCI MACHINE SUPPORT
+P: Kevin Hilman
+M: davinci-linux-open-source@linux.davincidsp.com
+S: Supported
+F: arch/arm/mach-davinci
+
SIS 190 ETHERNET DRIVER
P: Francois Romieu
M: romieu@fr.zoreil.com
@@ -5533,8 +5596,8 @@ F: drivers/staging/
STARFIRE/DURALAN NETWORK DRIVER
P: Ion Badulescu
-M: ionut@cs.columbia.edu
-S: Maintained
+M: ionut@badula.org
+S: Odd Fixes
F: drivers/net/starfire*
STARMODE RADIO IP (STRIP) PROTOCOL DRIVER
@@ -5668,6 +5731,13 @@ F: drivers/misc/tifm*
F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
+TI TWL4030 SERIES SOC CODEC DRIVER
+P: Peter Ujfalusi
+M: peter.ujfalusi@nokia.com
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/codecs/twl4030*
+
TIPC NETWORK LAYER
P: Per Liden
M: per.liden@ericsson.com
@@ -5751,17 +5821,17 @@ P: Jiri Kosina
M: trivial@kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
S: Maintained
-F: drivers/char/tty_*
-F: drivers/serial/serial_core.c
-F: include/linux/serial_core.h
-F: include/linux/serial.h
-F: include/linux/tty.h
TTY LAYER
P: Alan Cox
M: alan@lxorguk.ukuu.org.uk
S: Maintained
T: stgit http://zeniv.linux.org.uk/~alan/ttydev/
+F: drivers/char/tty_*
+F: drivers/serial/serial_core.c
+F: include/linux/serial_core.h
+F: include/linux/serial.h
+F: include/linux/tty.h
TULIP NETWORK DRIVERS
P: Grant Grundler
@@ -5799,7 +5869,7 @@ UBI FILE SYSTEM (UBIFS)
P: Artem Bityutskiy
M: dedekind@infradead.org
P: Adrian Hunter
-M: ext-adrian.hunter@nokia.com
+M: adrian.hunter@nokia.com
L: linux-mtd@lists.infradead.org
T: git git://git.infradead.org/ubifs-2.6.git
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
@@ -6427,6 +6497,24 @@ S: Supported
F: drivers/input/touchscreen/*wm97*
F: include/linux/wm97xx.h
+WOLFSON MICROELECTRONICS PMIC DRIVERS
+P: Mark Brown
+M: broonie@opensource.wolfsonmicro.com
+L: linux-kernel@vger.kernel.org
+T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
+W: http://opensource.wolfsonmicro.com/node/8
+S: Supported
+F: drivers/leds/leds-wm83*.c
+F: drivers/mfd/wm8*.c
+F: drivers/power/wm83*.c
+F: drivers/rtc/rtc-wm83*.c
+F: drivers/regulator/wm8*.c
+F: drivers/watchdog/wm83*_wdt.c
+F: include/linux/mfd/wm8350/
+F: include/linux/mfd/wm8400/
+F: sound/soc/codecs/wm8350.c
+F: sound/soc/codecs/wm8400.c
+
X.25 NETWORK LAYER
P: Henner Eisen
M: eis@baty.hanse.de
diff --git a/Makefile b/Makefile
index d1216fea0c92..19ec8d14d9ef 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 31
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
@@ -140,15 +140,13 @@ _all: modules
endif
srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
-TOPDIR := $(srctree)
-# FIXME - TOPDIR is obsolete, use srctree/objtree
objtree := $(CURDIR)
src := $(srctree)
obj := $(objtree)
VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
-export srctree objtree VPATH TOPDIR
+export srctree objtree VPATH
# SUBARCH tells the usermode build what the underlying arch is. That is set
@@ -327,7 +325,7 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
MODFLAGS = -DMODULE
CFLAGS_MODULE = $(MODFLAGS)
AFLAGS_MODULE = $(MODFLAGS)
-LDFLAGS_MODULE =
+LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds
CFLAGS_KERNEL =
AFLAGS_KERNEL =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
@@ -344,7 +342,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
- -Werror-implicit-function-declaration
+ -Werror-implicit-function-declaration \
+ -Wno-format-security
KBUILD_AFLAGS := -D__ASSEMBLY__
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index d22ace99d13d..dd8dcabf160f 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -52,7 +52,6 @@ struct pci_controller {
bus numbers. */
#define pcibios_assign_all_busses() 1
-#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO alpha_mv.min_io_address
#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 06c5c7a4afd3..2c12378e3aa9 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -1,102 +1,18 @@
#ifndef __ALPHA_PERCPU_H
#define __ALPHA_PERCPU_H
-#include <linux/compiler.h>
-#include <linux/threads.h>
-#include <linux/percpu-defs.h>
-
-/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
-#ifdef CONFIG_SMP
-
-/*
- * per_cpu_offset() is the offset that has to be added to a
- * percpu variable to get to the instance for a certain processor.
- */
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-#define per_cpu_offset(x) (__per_cpu_offset[x])
-
-#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
-#ifdef CONFIG_DEBUG_PREEMPT
-#define my_cpu_offset per_cpu_offset(smp_processor_id())
-#else
-#define my_cpu_offset __my_cpu_offset
-#endif
-
-#ifndef MODULE
-#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#define PER_CPU_ATTRIBUTES
-#else
/*
- * To calculate addresses of locally defined variables, GCC uses 32-bit
- * displacement from the GP. Which doesn't work for per cpu variables in
- * modules, as an offset to the kernel per cpu area is way above 4G.
+ * To calculate addresses of locally defined variables, GCC uses
+ * 32-bit displacement from the GP. Which doesn't work for per cpu
+ * variables in modules, as an offset to the kernel per cpu area is
+ * way above 4G.
*
- * This forces allocation of a GOT entry for per cpu variable using
- * ldq instruction with a 'literal' relocation.
- */
-#define SHIFT_PERCPU_PTR(var, offset) ({ \
- extern int simple_identifier_##var(void); \
- unsigned long __ptr, tmp_gp; \
- asm ( "br %1, 1f \n\
- 1: ldgp %1, 0(%1) \n\
- ldq %0, per_cpu__" #var"(%1)\t!literal" \
- : "=&r"(__ptr), "=&r"(tmp_gp)); \
- (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-
-#define PER_CPU_ATTRIBUTES __used
-
-#endif /* MODULE */
-
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
+ * Always use weak definitions for percpu variables in modules.
*/
-#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu)))
-#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
-#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
-
-#else /* ! SMP */
-
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
-
-#define PER_CPU_ATTRIBUTES
-
-#endif /* SMP */
-
-#ifdef CONFIG_SMP
-#define PER_CPU_BASE_SECTION ".data.percpu"
-#else
-#define PER_CPU_BASE_SECTION ".data"
-#endif
-
-#ifdef CONFIG_SMP
-
-#ifdef MODULE
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#else
-#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
-#endif
-#define PER_CPU_FIRST_SECTION ".first"
-
-#else
-
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_FIRST_SECTION ""
-
+#if defined(MODULE) && defined(CONFIG_SMP)
+#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define PER_CPU_ATTRIBUTES
+#include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 547e90951cec..3f390e8cc0b3 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -47,7 +47,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus;
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#else /* CONFIG_SMP */
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 9d87aaa08c0d..e89e0c2e15b1 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -2,6 +2,7 @@
#define _ALPHA_TLBFLUSH_H
#include <linux/mm.h>
+#include <linux/sched.h>
#include <asm/compiler.h>
#include <asm/pgalloc.h>
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index b4f284c72ff3..36b3a30ba0e5 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -22,23 +22,6 @@ static inline int cpu_to_node(int cpu)
return node;
}
-static inline cpumask_t node_to_cpumask(int node)
-{
- cpumask_t node_cpu_mask = CPU_MASK_NONE;
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpu_set(cpu, node_cpu_mask);
- }
-
-#ifdef DEBUG_NUMA
- printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
-#endif
-
- return node_cpu_mask;
-}
-
extern struct cpumask node_to_cpumask_map[];
/* FIXME: This is dumb, recalculating every time. But simple. */
static const struct cpumask *cpumask_of_node(int node)
@@ -55,7 +38,6 @@ static const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
-#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
#endif /* !CONFIG_NUMA */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index b1fe5674c3a1..42aa078a5e4d 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -548,16 +548,16 @@ setup_profiling_timer(unsigned int multiplier)
static void
-send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
+send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
int i;
mb();
- for_each_cpu_mask(i, to_whom)
+ for_each_cpu(i, to_whom)
set_bit(operation, &ipi_data[i].bits);
mb();
- for_each_cpu_mask(i, to_whom)
+ for_each_cpu(i, to_whom)
wripir(i);
}
@@ -624,7 +624,7 @@ smp_send_reschedule(int cpu)
printk(KERN_WARNING
"smp_send_reschedule: Sending IPI to self.\n");
#endif
- send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void
@@ -636,17 +636,17 @@ smp_send_stop(void)
if (hard_smp_processor_id() != boot_cpu_id)
printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
#endif
- send_ipi_message(to_whom, IPI_CPU_STOP);
+ send_ipi_message(&to_whom, IPI_CPU_STOP);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
static void
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index b9d6568e5f7f..75fe1d6877e9 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -139,6 +139,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
.mdebug 0 : {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index aef63c8e3d2d..6523f29bb3b2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -217,6 +217,7 @@ config ARCH_REALVIEW
select ICST307
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+ select ARCH_WANT_OPTIONAL_GPIOLIB
help
This enables support for ARM Ltd RealView boards.
@@ -229,6 +230,7 @@ config ARCH_VERSATILE
select ICST307
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+ select ARCH_WANT_OPTIONAL_GPIOLIB
help
This enables support for ARM Ltd Versatile board.
@@ -327,6 +329,20 @@ config ARCH_H720X
help
This enables support for systems based on the Hynix HMS720x
+config ARCH_NOMADIK
+ bool "STMicroelectronics Nomadik"
+ select ARM_AMBA
+ select ARM_VIC
+ select CPU_ARM926T
+ select HAVE_CLK
+ select COMMON_CLKDEV
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ help
+ Support for the Nomadik platform by ST-Ericsson
+
config ARCH_IOP13XX
bool "IOP13xx-based"
depends on MMU
@@ -716,6 +732,8 @@ source "arch/arm/mach-at91/Kconfig"
source "arch/arm/plat-mxc/Kconfig"
+source "arch/arm/mach-nomadik/Kconfig"
+
source "arch/arm/mach-netx/Kconfig"
source "arch/arm/mach-ns9xxx/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a71fd941ade7..a89e4734b8f0 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -99,14 +99,6 @@ config DEBUG_CLPS711X_UART2
output to the second serial port on these devices. Saying N will
cause the debug messages to appear on the first serial port.
-config DEBUG_S3C_PORT
- depends on DEBUG_LL && PLAT_S3C
- bool "Kernel low-level debugging messages via S3C UART"
- help
- Say Y here if you want debug print routines to go to one of the
- S3C internal UARTs. The chosen UART must have been configured
- before it is used.
-
config DEBUG_S3C_UART
depends on PLAT_S3C
int "S3C UART to use for low-level debug"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c877d6df23d1..97cca075d0f9 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -137,6 +137,7 @@ machine-$(CONFIG_ARCH_MX1) := mx1
machine-$(CONFIG_ARCH_MX2) := mx2
machine-$(CONFIG_ARCH_MX3) := mx3
machine-$(CONFIG_ARCH_NETX) := netx
+machine-$(CONFIG_ARCH_NOMADIK) := nomadik
machine-$(CONFIG_ARCH_NS9XXX) := ns9xxx
machine-$(CONFIG_ARCH_OMAP1) := omap1
machine-$(CONFIG_ARCH_OMAP2) := omap2
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 6ed89836e908..bc1f9ad61ff6 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -26,6 +26,15 @@
#include <asm/mach/irq.h>
#include <asm/hardware/vic.h>
+static void vic_ack_irq(unsigned int irq)
+{
+ void __iomem *base = get_irq_chip_data(irq);
+ irq &= 31;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ /* moreover, clear the soft-triggered, in case it was the reason */
+ writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
static void vic_mask_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
@@ -253,12 +262,21 @@ static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg
static struct irq_chip vic_chip = {
.name = "VIC",
- .ack = vic_mask_irq,
+ .ack = vic_ack_irq,
.mask = vic_mask_irq,
.unmask = vic_unmask_irq,
.set_wake = vic_set_wake,
};
+/* The PL190 cell from ARM has been modified by ST, so handle both here */
+static void vik_init_st(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources);
+
+enum vic_vendor {
+ VENDOR_ARM = 0x41,
+ VENDOR_ST = 0x80,
+};
+
/**
* vic_init - initialise a vectored interrupt controller
* @base: iomem base address
@@ -270,6 +288,28 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
{
unsigned int i;
+ u32 cellid = 0;
+ enum vic_vendor vendor;
+
+ /* Identify which VIC cell this one is, by reading the ID */
+ for (i = 0; i < 4; i++) {
+ u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ cellid |= (readl(addr) & 0xff) << (8 * i);
+ }
+ vendor = (cellid >> 12) & 0xff;
+ printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
+ base, cellid, vendor);
+
+ switch(vendor) {
+ case VENDOR_ST:
+ vik_init_st(base, irq_start, vic_sources);
+ return;
+ default:
+ printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
+ /* fall through */
+ case VENDOR_ARM:
+ break;
+ }
/* Disable all interrupts initially. */
@@ -306,3 +346,60 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
vic_pm_register(base, irq_start, resume_sources);
}
+
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ * and 020 within the page. We call this "second block".
+ */
+static void __init vik_init_st(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources)
+{
+ unsigned int i;
+ int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+ /* Disable all interrupts initially. */
+
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_IRQ_STATUS);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+
+ /*
+ * Make sure we clear all existing interrupts. The vector registers
+ * in this cell are after the second block of general registers,
+ * so we can address them using standard offsets, but only from
+ * the second base address, which is 0x20 in the page
+ */
+ if (vic_2nd_block) {
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+ /* ST has 16 vectors as well, but we don't enable them by now */
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(0, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (vic_sources & (1 << i)) {
+ unsigned int irq = irq_start + i;
+
+ set_irq_chip(irq, &vic_chip);
+ set_irq_chip_data(irq, base);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ }
+}
diff --git a/arch/arm/configs/kb9202_defconfig b/arch/arm/configs/kb9202_defconfig
index 8e74c66f239d..605a8462f172 100644
--- a/arch/arm/configs/kb9202_defconfig
+++ b/arch/arm/configs/kb9202_defconfig
@@ -1,109 +1,246 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-rc2
-# Sun Aug 14 19:26:59 2005
+# Linux kernel version: 2.6.30-rc8
+# Wed Jun 3 13:52:33 2009
#
CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_MMU=y
-CONFIG_UID16=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
-# Code maturity level options
+# General setup
#
-# CONFIG_EXPERIMENTAL is not set
-CONFIG_CLEAN_COMPILE=y
+CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
# CONFIG_SWAP is not set
-# CONFIG_SYSVIPC is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-# CONFIG_KOBJECT_UEVENT is not set
-# CONFIG_IKCONFIG is not set
+CONFIG_ANON_INODES=y
# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
-# CONFIG_TINY_SHMEM is not set
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_MARKERS=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
-# Loadable module support
+# IO Schedulers
#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
#
# System Type
#
-# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+CONFIG_ARCH_AT91=y
# CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_INTEGRATOR is not set
-# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MMP is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_AAEC2000 is not set
-CONFIG_ARCH_AT91=y
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_W90X900 is not set
+
+#
+# Atmel AT91 System-on-Chip
+#
CONFIG_ARCH_AT91RM9200=y
+# CONFIG_ARCH_AT91SAM9260 is not set
+# CONFIG_ARCH_AT91SAM9261 is not set
+# CONFIG_ARCH_AT91SAM9263 is not set
+# CONFIG_ARCH_AT91SAM9RL is not set
+# CONFIG_ARCH_AT91SAM9G20 is not set
+# CONFIG_ARCH_AT91CAP9 is not set
+# CONFIG_ARCH_AT91X40 is not set
+CONFIG_AT91_PMC_UNIT=y
#
-# AT91RM9200 Implementations
+# AT91RM9200 Board Type
#
+# CONFIG_MACH_ONEARM is not set
# CONFIG_ARCH_AT91RM9200DK is not set
# CONFIG_MACH_AT91RM9200EK is not set
# CONFIG_MACH_CSB337 is not set
# CONFIG_MACH_CSB637 is not set
# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_ATEB9200 is not set
CONFIG_MACH_KB9200=y
+# CONFIG_MACH_PICOTUX2XX is not set
+# CONFIG_MACH_KAFA is not set
+# CONFIG_MACH_ECBAT91 is not set
+# CONFIG_MACH_YL9200 is not set
+
+#
+# AT91 Board Options
+#
+
+#
+# AT91 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_AT91_TIMER_HZ=128
+CONFIG_AT91_EARLY_DBGU=y
+# CONFIG_AT91_EARLY_USART0 is not set
+# CONFIG_AT91_EARLY_USART1 is not set
+# CONFIG_AT91_EARLY_USART2 is not set
+# CONFIG_AT91_EARLY_USART3 is not set
+# CONFIG_AT91_EARLY_USART4 is not set
+# CONFIG_AT91_EARLY_USART5 is not set
#
# Processor Type
#
CONFIG_CPU_32=y
CONFIG_CPU_ARM920T=y
-CONFIG_CPU_32v4=y
+CONFIG_CPU_32v4T=y
CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_PABRT_NOIFAR=y
CONFIG_CPU_CACHE_V4WT=y
CONFIG_CPU_CACHE_VIVT=y
CONFIG_CPU_COPY_V4WB=y
CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
#
# Processor Features
@@ -112,23 +249,48 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_ICACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_OUTER_CACHE is not set
#
# Bus support
#
-CONFIG_ISA_DMA_API=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
# CONFIG_PCCARD is not set
#
# Kernel Features
#
-# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT=y
+CONFIG_HZ=128
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_HAS_HOLES_MEMORYMODEL is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_LEDS is not set
CONFIG_ALIGNMENT_TRAP=y
@@ -137,8 +299,16 @@ CONFIG_ALIGNMENT_TRAP=y
#
CONFIG_ZBOOT_ROM_TEXT=0x10000000
CONFIG_ZBOOT_ROM_BSS=0x20040000
-CONFIG_ZBOOT_ROM=y
-CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/ram rw initrd=0x20210000,654933"
+# CONFIG_ZBOOT_ROM is not set
+CONFIG_CMDLINE="noinitrd root=/dev/mtdblock0 rootfstype=jffs2 mem=64M"
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
#
# Floating point emulation
@@ -149,74 +319,251 @@ CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/ram rw initrd=0x20210000,654933"
#
CONFIG_FPE_NWFPE=y
# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
#
# Userspace binary formats
#
CONFIG_BINFMT_ELF=y
-CONFIG_BINFMT_AOUT=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
CONFIG_BINFMT_MISC=y
-# CONFIG_ARTHUR is not set
#
# Power management options
#
# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
#
-# Device Drivers
+# Networking options
#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
-# Generic Driver Options
+# Network testing
#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-CONFIG_DEBUG_DRIVER=y
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
-# Memory Technology Devices (MTD)
+# Device Drivers
#
-# CONFIG_MTD is not set
#
-# Parallel port support
+# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+# CONFIG_MTD_NAND_ATMEL_ECC_HW is not set
+CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+CONFIG_MTD_UBI_GLUEBI=y
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-CONFIG_BLK_DEV_NBD=y
+# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+CONFIG_ATMEL_SSC=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
#
-# IO Schedulers
+# EEPROM support
#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
#
# SCSI device support
#
+# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
CONFIG_SCSI_PROC_FS=y
#
@@ -232,145 +579,87 @@ CONFIG_CHR_DEV_SG=y
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
#
-# SCSI Transport Attributes
+# SCSI Transports
#
-# CONFIG_SCSI_SPI_ATTRS is not set
+CONFIG_SCSI_SPI_ATTRS=m
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_SCSI_SATA is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-
-#
-# I2O device support
-#
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_IP_TCPDIAG is not set
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_BIC=y
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_ARM_AT91_ETHER=y
+# CONFIG_AX88796 is not set
# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
+# Wireless LAN
#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN (non-hamradio)
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_NET_RADIO is not set
#
-# Wan interfaces
+# USB Network Adapters
#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
-
-#
-# ISDN subsystem
-#
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
#
# Input device support
#
CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
#
# Userland interfaces
@@ -380,7 +669,6 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
@@ -390,23 +678,25 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
#
-CONFIG_SERIO=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO is not set
# CONFIG_GAMEPORT is not set
#
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -419,215 +709,362 @@ CONFIG_HW_CONSOLE=y
#
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
#
-# IPMI
+# Memory mapped GPIO expanders:
#
-# CONFIG_IPMI_HANDLER is not set
#
-# Watchdog Cards
+# I2C GPIO expanders:
#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
-# CONFIG_AT91RM9200_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
#
-# Ftape, the floppy tape device driver
+# PCI GPIO expanders:
#
-# CONFIG_RAW_DRIVER is not set
#
-# TPM devices
+# SPI GPIO expanders:
#
-# CONFIG_AT91_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
#
-# I2C support
+# Watchdog Device Drivers
#
-# CONFIG_I2C is not set
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91RM9200_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
-# Misc devices
+# Sonics Silicon Backplane
#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
#
# Multimedia devices
#
+
+#
+# Multimedia core support
+#
# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
#
-# Digital Video Broadcasting Devices
+# Multimedia drivers
#
-# CONFIG_DVB is not set
+# CONFIG_DAB is not set
#
# Graphics support
#
-# CONFIG_FB is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
#
# Console display driver support
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_MINI_4x6=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+# CONFIG_LOGO is not set
# CONFIG_SOUND is not set
-
-#
-# USB support
-#
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
CONFIG_USB=y
-CONFIG_USB_DEBUG=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
#
CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
CONFIG_USB_OHCI_HCD=y
-# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
#
# USB Device Class drivers
#
-# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
-# CONFIG_USB_STORAGE_FREECOM is not set
-# CONFIG_USB_STORAGE_DPCM is not set
-
-#
-# USB Input Devices
-#
-# CONFIG_USB_HID is not set
#
-# USB HID Boot Protocol drivers
+# also be needed; see USB_STORAGE Help for more info
#
-# CONFIG_USB_KBD is not set
-# CONFIG_USB_MOUSE is not set
-# CONFIG_USB_AIPTEK is not set
-# CONFIG_USB_WACOM is not set
-# CONFIG_USB_ACECAD is not set
-# CONFIG_USB_KBTAB is not set
-# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_ITMTOUCH is not set
-# CONFIG_USB_EGALAX is not set
-# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
#
# USB Imaging devices
#
+# CONFIG_USB_MDC800 is not set
# CONFIG_USB_MICROTEK is not set
#
-# USB Multimedia devices
+# USB port drivers
#
-# CONFIG_USB_DABUSB is not set
+# CONFIG_USB_SERIAL is not set
#
-# Video4Linux support is needed for USB Multimedia device support
+# USB Miscellaneous drivers
#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
#
-# USB Network Adapters
+# OTG and related infrastructure
#
-# CONFIG_USB_KAWETH is not set
-# CONFIG_USB_PEGASUS is not set
-# CONFIG_USB_USBNET is not set
-# CONFIG_USB_MON is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# USB port drivers
+# MMC/SD/SDIO Card Drivers
#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# USB Serial Converter support
+# MMC/SD/SDIO Host Controller Drivers
#
-# CONFIG_USB_SERIAL is not set
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_AT91=y
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_NEW_LEDS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
#
-# USB Miscellaneous drivers
+# RTC interfaces
#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
-# CONFIG_USB_LCD is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETKIT is not set
-# CONFIG_USB_PHIDGETSERVO is not set
-# CONFIG_USB_IDMOUSE is not set
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
#
-# USB DSL modem support
+# SPI RTC drivers
#
#
-# USB Gadget Support
+# Platform RTC drivers
#
-# CONFIG_USB_GADGET is not set
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
#
-# MMC/SD Card support
+# on-CPU RTC drivers
#
-# CONFIG_MMC is not set
+CONFIG_RTC_DRV_AT91RM9200=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
#
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-# CONFIG_EXT2_FS_POSIX_ACL is not set
-# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
#
-# XFS support
+# Caches
#
-# CONFIG_XFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -639,7 +1076,7 @@ CONFIG_AUTOFS4_FS=y
# DOS/FAT/NT Filesystems
#
CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
+# CONFIG_MSDOS_FS is not set
CONFIG_VFAT_FS=y
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
@@ -649,53 +1086,70 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# Pseudo filesystems
#
CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
-CONFIG_DEVPTS_FS_XATTR=y
-# CONFIG_DEVPTS_FS_SECURITY is not set
CONFIG_TMPFS=y
-# CONFIG_TMPFS_XATTR is not set
+# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_UBIFS_FS is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFSD is not set
+# CONFIG_NFS_V4 is not set
CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
@@ -719,7 +1173,7 @@ CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-CONFIG_NLS_ASCII=y
+# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
@@ -733,47 +1187,119 @@ CONFIG_NLS_ASCII=y
# CONFIG_NLS_ISO8859_15 is not set
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
-# CONFIG_MAGIC_SYSRQ is not set
-CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
# CONFIG_SCHEDSTATS is not set
-# CONFIG_DEBUG_SLAB is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_FS is not set
-CONFIG_FRAME_POINTER=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
-# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
# CONFIG_CRYPTO is not set
-
-#
-# Hardware crypto devices
-#
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
new file mode 100644
index 000000000000..9bb45b932f04
--- /dev/null
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -0,0 +1,1316 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.30
+# Tue Jun 23 22:57:16 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+CONFIG_ARCH_NOMADIK=y
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# Nomadik boards
+#
+CONFIG_MACH_NOMADIK_8815NHK=y
+CONFIG_NOMADIK_8815=y
+CONFIG_I2C_BITBANG_8815NHK=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_CACHE_L2X0=y
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+# CONFIG_IP_PIMSM_V1 is not set
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_LL is not set
+CONFIG_BT_HCIVHCI=m
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_TESTS=m
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+CONFIG_MTD_NAND_NOMADIK=y
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+CONFIG_MTD_ONENAND_GENERIC=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+CONFIG_SMC91X=y
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+CONFIG_NETDEV_1000=y
+CONFIG_NETDEV_10000=y
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+CONFIG_NETCONSOLE=m
+# CONFIG_NETCONSOLE_DYNAMIC is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_GPIO is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_DEBUG_GPIO=y
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_PL061 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_WACOM is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+
+#
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_NEW_LEDS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_PL030 is not set
+# CONFIG_RTC_DRV_PL031 is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_WEAK_PW_HASH=y
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=y
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2d58b8fe59be..b49810461e41 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -260,6 +260,7 @@ CONFIG_MACH_NEXCODER_2440=y
CONFIG_SMDK2440_CPU2440=y
CONFIG_MACH_AT2440EVB=y
CONFIG_CPU_S3C2442=y
+CONFIG_MACH_MINI2440=y
#
# S3C2442 Machines
@@ -2298,7 +2299,6 @@ CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_ICEDCC is not set
-CONFIG_DEBUG_S3C_PORT=y
CONFIG_DEBUG_S3C_UART=0
#
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig
index 2e8fa50e9a09..32860609e057 100644
--- a/arch/arm/configs/s3c6400_defconfig
+++ b/arch/arm/configs/s3c6400_defconfig
@@ -816,7 +816,6 @@ CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_ICEDCC is not set
-CONFIG_DEBUG_S3C_PORT=y
CONFIG_DEBUG_S3C_UART=0
#
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index 07dfb98df4f0..9d32faef05f6 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -857,7 +857,6 @@ CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_ICEDCC is not set
-# CONFIG_DEBUG_S3C_PORT is not set
CONFIG_DEBUG_S3C_UART=0
#
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 2d827e121147..4762d9001298 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc6
-# Mon Jun 1 09:18:22 2009
+# Linux kernel version: 2.6.31-rc1
+# Thu Jul 2 00:16:59 2009
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -9,7 +9,7 @@ CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
+CONFIG_HAVE_TCM=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -18,13 +18,12 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -68,7 +67,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -81,8 +79,13 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
# CONFIG_AIO is not set
+
+#
+# Performance Counters
+#
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLUB_DEBUG=y
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
@@ -94,6 +97,10 @@ CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -106,7 +113,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -138,9 +145,9 @@ CONFIG_DEFAULT_IOSCHED="deadline"
# CONFIG_ARCH_EP93XX is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -216,8 +223,8 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
-# CONFIG_OUTER_CACHE is not set
CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
CONFIG_COMMON_CLKDEV=y
#
@@ -243,7 +250,6 @@ CONFIG_PREEMPT=y
CONFIG_HZ=100
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
-CONFIG_ARCH_FLATMEM_HAS_HOLES=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
# CONFIG_HIGHMEM is not set
@@ -258,17 +264,18 @@ CONFIG_SPLIT_PTLOCK_CPUS=4096
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
#
# Boot options
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mtdblock2 rw rootfstype=yaffs2 console=ttyAMA0,115200n8 ab3100.force=0,0x48 mtdparts=u300nand:128k@0x0(bootrecords)ro,8064k@128k(free)ro,253952k@8192k(platform) lpj=515072"
+CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -359,6 +366,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -497,6 +505,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_AT24 is not set
# CONFIG_EEPROM_AT25 is not set
# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -538,6 +547,7 @@ CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_LM8323 is not set
# CONFIG_KEYBOARD_GPIO is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
@@ -597,9 +607,11 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
+CONFIG_I2C_STU300=y
#
# External I2C/SMBus adapter drivers
@@ -620,7 +632,6 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -635,6 +646,7 @@ CONFIG_SPI_MASTER=y
#
# CONFIG_SPI_BITBANG is not set
# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_PL022=y
#
# SPI Protocol Masters
@@ -647,6 +659,7 @@ CONFIG_POWER_SUPPLY=y
# CONFIG_PDA_POWER is not set
# CONFIG_BATTERY_DS2760 is not set
# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
@@ -657,6 +670,7 @@ CONFIG_WATCHDOG=y
# Watchdog Device Drivers
#
# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_COH901327_WATCHDOG=y
CONFIG_SSB_POSSIBLE=y
#
@@ -678,22 +692,9 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+CONFIG_AB3100_CORE=y
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -760,6 +761,11 @@ CONFIG_SND_JACK=y
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_ARM is not set
# CONFIG_SND_SPI is not set
@@ -770,7 +776,7 @@ CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
-# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_DEBUG=y
# CONFIG_MMC_UNSAFE_RESUME is not set
#
@@ -797,7 +803,7 @@ CONFIG_LEDS_CLASS=y
#
# CONFIG_LEDS_PCA9532 is not set
# CONFIG_LEDS_GPIO is not set
-# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
# CONFIG_LEDS_DAC124S085 is not set
# CONFIG_LEDS_BD2802 is not set
@@ -845,6 +851,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -887,7 +894,10 @@ CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_LP3971 is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@@ -900,16 +910,20 @@ CONFIG_REGULATOR=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
#
# Caches
@@ -1033,6 +1047,7 @@ CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1063,18 +1078,16 @@ CONFIG_DEBUG_INFO=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1109,6 +1122,7 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC32 is not set
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
+CONFIG_GENERIC_ALLOCATOR=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1a711ea8418b..fd03fb63a332 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
#ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm)
{
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index c61642b40603..9f390ce335cb 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -12,4 +12,7 @@ struct dev_archdata {
#endif
};
+struct pdev_archdata {
+};
+
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 263fed05ea33..c3fe09f8fd77 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -101,14 +101,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_SMP
/* check for possible thread migration */
- if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
+ if (!cpumask_empty(mm_cpumask(next)) &&
+ !cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all();
#endif
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())
- cpu_clear(cpu, prev->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
#endif
}
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index be962c1349c4..9c746af1bf6e 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -12,7 +12,7 @@
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 0abf386ba3d3..226cddd2fb65 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -6,8 +6,6 @@
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
-#define pcibios_scan_all_fns(a, b) 0
-
#ifdef CONFIG_PCI_HOST_ITE8152
/* ITE bridge requires setting latency timer to avoid early bus access
termination by PIC bus mater devices
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 1cd2d6416bda..c433c6c73112 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -285,15 +285,6 @@ extern struct page *empty_zero_page;
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_special(pte) (0)
-/*
- * The following only works if pte_present() is not true.
- */
-#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 2)
-#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 30
-
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -384,16 +375,50 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-/* Encode and decode a swap entry.
+/*
+ * Encode and decode a swap entry. Swap entries are stored in the Linux
+ * page tables as follows:
*
- * We support up to 32GB of swap on 4k machines
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <--------------- offset --------------------> <--- type --> 0 0
+ *
+ * This gives us up to 127 swap files and 32GB per swap file. Note that
+ * the offset field is always non-zero.
*/
-#define __swp_type(x) (((x).val >> 2) & 0x7f)
-#define __swp_offset(x) ((x).val >> 9)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
+#define __SWP_TYPE_SHIFT 2
+#define __SWP_TYPE_BITS 7
+#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+/*
+ * It is an error for the kernel to have more swap files than we can
+ * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
+ * is increased beyond what we presently support.
+ */
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+/*
+ * Encode and decode a file entry. File entries are stored in the Linux
+ * page tables as follows:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------------------ offset -------------------------> 1 0
+ */
+#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 2)
+#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
+
+#define PTE_FILE_MAX_BITS 30
+
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a06e735b262a..e0d763be1846 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* show local interrupt info
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c964f3fc3bc5..a45ab5dd8255 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
if (tlb_flag(TLB_V3_PAGE))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V4_U_PAGE))
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 096f600dc8d8..b7c3490eaa24 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -98,17 +98,6 @@ unlock:
return 0;
}
-/* Handle bad interrupts */
-static struct irq_desc bad_irq_desc = {
- .handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
-};
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* We are not allocating bad_irq_desc.affinity or .pending_mask */
-#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
-#endif
-
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
@@ -124,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
- if (irq >= NR_IRQS)
- handle_bad_irq(irq, &bad_irq_desc);
- else
+ if (unlikely(irq >= NR_IRQS)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "Bad IRQ%u\n", irq);
+ ack_bad_irq(irq);
+ } else {
generic_handle_irq(irq);
+ }
/* AT91 specific workaround */
irq_finish(irq);
@@ -165,10 +157,6 @@ void __init init_IRQ(void)
for (irq = 0; irq < NR_IRQS; irq++)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
-#ifdef CONFIG_SMP
- cpumask_setall(bad_irq_desc.affinity);
- bad_irq_desc.node = smp_processor_id();
-#endif
init_arch_irq();
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c5..e0d32770bb3d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
- cpu_clear(cpu, p->mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- cpu_set(cpu, mm->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
if (tlb_ops_need_broadcast())
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
else
local_flush_tlb_mm(mm);
}
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
} else
local_flush_tlb_page(vma, uaddr);
}
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
} else
local_flush_tlb_range(vma, start, end);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 4340bf3d2c84..5cc4812c9763 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/page.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -63,7 +64,7 @@ SECTIONS
usr/built-in.o(.init.ramfs)
__initramfs_end = .;
#endif
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__per_cpu_load = .;
__per_cpu_start = .;
*(.data.percpu.page_aligned)
@@ -73,7 +74,7 @@ SECTIONS
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
INIT_DATA
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
}
@@ -82,6 +83,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
#ifndef CONFIG_HOTPLUG_CPU
@@ -118,7 +120,7 @@ SECTIONS
*(.got) /* Global offset table */
}
- RODATA
+ RO_DATA(PAGE_SIZE)
_etext = .; /* End of text and rodata section */
@@ -158,17 +160,17 @@ SECTIONS
*(.data.init_task)
#ifdef CONFIG_XIP_KERNEL
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__nosave_begin = .;
*(.data.nosave)
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__nosave_end = .;
/*
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 323b47f2b52f..8f3e15abdb62 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -23,6 +23,12 @@ config ARCH_AT91SAM9261
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+config ARCH_AT91SAM9G10
+ bool "AT91SAM9G10"
+ select CPU_ARM926T
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+
config ARCH_AT91SAM9263
bool "AT91SAM9263"
select CPU_ARM926T
@@ -41,6 +47,12 @@ config ARCH_AT91SAM9G20
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+config ARCH_AT91SAM9G45
+ bool "AT91SAM9G45"
+ select CPU_ARM926T
+ select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
+
config ARCH_AT91CAP9
bool "AT91CAP9"
select CPU_ARM926T
@@ -224,6 +236,21 @@ endif
# ----------------------------------------------------------
+if ARCH_AT91SAM9G10
+
+comment "AT91SAM9G10 Board Type"
+
+config MACH_AT91SAM9G10EK
+ bool "Atmel AT91SAM9G10-EK Evaluation Kit"
+ depends on ARCH_AT91SAM9G10
+ help
+ Select this if you are using Atmel's AT91SAM9G10-EK Evaluation Kit.
+ <http://www.atmel.com/dyn/products/tools_card.asp?tool_id=4588>
+
+endif
+
+# ----------------------------------------------------------
+
if ARCH_AT91SAM9263
comment "AT91SAM9263 Board Type"
@@ -280,6 +307,22 @@ endif
# ----------------------------------------------------------
+if ARCH_AT91SAM9G45
+
+comment "AT91SAM9G45 Board Type"
+
+config MACH_AT91SAM9G45EKES
+ bool "Atmel AT91SAM9G45-EKES Evaluation Kit"
+ depends on ARCH_AT91SAM9G45
+ help
+ Select this if you are using Atmel's AT91SAM9G45-EKES Evaluation Kit.
+ "ES" at the end of the name means that this board is an
+ Engineering Sample.
+
+endif
+
+# ----------------------------------------------------------
+
if ARCH_AT91CAP9
comment "AT91CAP9 Board Type"
@@ -315,13 +358,13 @@ comment "AT91 Board Options"
config MTD_AT91_DATAFLASH_CARD
bool "Enable DataFlash Card support"
- depends on (ARCH_AT91RM9200DK || MACH_AT91RM9200EK || MACH_AT91SAM9260EK || MACH_AT91SAM9261EK || MACH_AT91SAM9263EK || MACH_AT91SAM9G20EK || MACH_ECBAT91 || MACH_SAM9_L9260 || MACH_AT91CAP9ADK || MACH_NEOCORE926)
+ depends on (ARCH_AT91RM9200DK || MACH_AT91RM9200EK || MACH_AT91SAM9260EK || MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK || MACH_AT91SAM9263EK || MACH_AT91SAM9G20EK || MACH_ECBAT91 || MACH_SAM9_L9260 || MACH_AT91CAP9ADK || MACH_NEOCORE926)
help
Enable support for the DataFlash card.
config MTD_NAND_ATMEL_BUSWIDTH_16
bool "Enable 16-bit data bus interface to NAND flash"
- depends on (MACH_AT91SAM9260EK || MACH_AT91SAM9261EK || MACH_AT91SAM9263EK || MACH_AT91SAM9G20EK || MACH_AT91CAP9ADK)
+ depends on (MACH_AT91SAM9260EK || MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK || MACH_AT91SAM9263EK || MACH_AT91SAM9G20EK || MACH_AT91SAM9G45EKES || MACH_AT91CAP9ADK)
help
On AT91SAM926x boards both types of NAND flash can be present
(8 and 16 bit data bus width).
@@ -383,7 +426,7 @@ config AT91_EARLY_USART2
config AT91_EARLY_USART3
bool "USART3"
- depends on (ARCH_AT91RM9200 || ARCH_AT91SAM9RL || ARCH_AT91SAM9260 || ARCH_AT91SAM9G20)
+ depends on (ARCH_AT91RM9200 || ARCH_AT91SAM9RL || ARCH_AT91SAM9260 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45)
config AT91_EARLY_USART4
bool "USART4"
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index c69ff237fd14..4eaf80abe100 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -13,9 +13,11 @@ obj-$(CONFIG_AT91_PMC_UNIT) += clock.o
obj-$(CONFIG_ARCH_AT91RM9200) += at91rm9200.o at91rm9200_time.o at91rm9200_devices.o
obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+ obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
@@ -43,6 +45,7 @@ obj-$(CONFIG_MACH_AFEB9260) += board-afeb-9260v1.o
# AT91SAM9261 board-specific support
obj-$(CONFIG_MACH_AT91SAM9261EK) += board-sam9261ek.o
+obj-$(CONFIG_MACH_AT91SAM9G10EK) += board-sam9261ek.o
# AT91SAM9263 board-specific support
obj-$(CONFIG_MACH_AT91SAM9263EK) += board-sam9263ek.o
@@ -55,6 +58,9 @@ obj-$(CONFIG_MACH_AT91SAM9RLEK) += board-sam9rlek.o
# AT91SAM9G20 board-specific support
obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
+# AT91SAM9G45 board-specific support
+obj-$(CONFIG_MACH_AT91SAM9G45EKES) += board-sam9m10g45ek.o
+
# AT91CAP9 board-specific support
obj-$(CONFIG_MACH_AT91CAP9ADK) += board-cap9adk.o
diff --git a/arch/arm/mach-at91/Makefile.boot b/arch/arm/mach-at91/Makefile.boot
index 071a2506a69f..3462b815054a 100644
--- a/arch/arm/mach-at91/Makefile.boot
+++ b/arch/arm/mach-at91/Makefile.boot
@@ -7,6 +7,10 @@ ifeq ($(CONFIG_ARCH_AT91CAP9),y)
zreladdr-y := 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70410000
+else ifeq ($(CONFIG_ARCH_AT91SAM9G45),y)
+ zreladdr-y := 0x70008000
+params_phys-y := 0x70000100
+initrd_phys-y := 0x70410000
else
zreladdr-y := 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 3acd7d7e6a42..4ecf37996c77 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -16,6 +16,7 @@
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <mach/cpu.h>
#include <mach/at91sam9261.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
@@ -30,7 +31,11 @@ static struct map_desc at91sam9261_io_desc[] __initdata = {
.pfn = __phys_to_pfn(AT91_BASE_SYS),
.length = SZ_16K,
.type = MT_DEVICE,
- }, {
+ },
+};
+
+static struct map_desc at91sam9261_sram_desc[] __initdata = {
+ {
.virtual = AT91_IO_VIRT_BASE - AT91SAM9261_SRAM_SIZE,
.pfn = __phys_to_pfn(AT91SAM9261_SRAM_BASE),
.length = AT91SAM9261_SRAM_SIZE,
@@ -38,6 +43,15 @@ static struct map_desc at91sam9261_io_desc[] __initdata = {
},
};
+static struct map_desc at91sam9g10_sram_desc[] __initdata = {
+ {
+ .virtual = AT91_IO_VIRT_BASE - AT91SAM9G10_SRAM_SIZE,
+ .pfn = __phys_to_pfn(AT91SAM9G10_SRAM_BASE),
+ .length = AT91SAM9G10_SRAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
/* --------------------------------------------------------------------
* Clocks
* -------------------------------------------------------------------- */
@@ -263,6 +277,12 @@ void __init at91sam9261_initialize(unsigned long main_clock)
/* Map peripherals */
iotable_init(at91sam9261_io_desc, ARRAY_SIZE(at91sam9261_io_desc));
+ if (cpu_is_at91sam9g10())
+ iotable_init(at91sam9g10_sram_desc, ARRAY_SIZE(at91sam9g10_sram_desc));
+ else
+ iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc));
+
+
at91_arch_reset = at91sam9261_reset;
pm_power_off = at91sam9261_poweroff;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
new file mode 100644
index 000000000000..85166b7e69a1
--- /dev/null
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -0,0 +1,360 @@
+/*
+ * Chip-specific setup code for the AT91SAM9G45 family
+ *
+ * Copyright (C) 2009 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pm.h>
+
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/at91sam9g45.h>
+#include <mach/at91_pmc.h>
+#include <mach/at91_rstc.h>
+#include <mach/at91_shdwc.h>
+
+#include "generic.h"
+#include "clock.h"
+
+static struct map_desc at91sam9g45_io_desc[] __initdata = {
+ {
+ .virtual = AT91_VA_BASE_SYS,
+ .pfn = __phys_to_pfn(AT91_BASE_SYS),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_IO_VIRT_BASE - AT91SAM9G45_SRAM_SIZE,
+ .pfn = __phys_to_pfn(AT91SAM9G45_SRAM_BASE),
+ .length = AT91SAM9G45_SRAM_SIZE,
+ .type = MT_DEVICE,
+ }
+};
+
+/* --------------------------------------------------------------------
+ * Clocks
+ * -------------------------------------------------------------------- */
+
+/*
+ * The peripheral clocks.
+ */
+static struct clk pioA_clk = {
+ .name = "pioA_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_PIOA,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk pioB_clk = {
+ .name = "pioB_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_PIOB,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk pioC_clk = {
+ .name = "pioC_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_PIOC,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk pioDE_clk = {
+ .name = "pioDE_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_PIODE,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart0_clk = {
+ .name = "usart0_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_US0,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart1_clk = {
+ .name = "usart1_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_US1,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart2_clk = {
+ .name = "usart2_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_US2,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart3_clk = {
+ .name = "usart3_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_US3,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk mmc0_clk = {
+ .name = "mci0_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_MCI0,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk twi0_clk = {
+ .name = "twi0_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_TWI0,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk twi1_clk = {
+ .name = "twi1_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_TWI1,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk spi0_clk = {
+ .name = "spi0_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_SPI0,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk spi1_clk = {
+ .name = "spi1_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_SPI1,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk ssc0_clk = {
+ .name = "ssc0_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_SSC0,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk ssc1_clk = {
+ .name = "ssc1_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk tcb_clk = {
+ .name = "tcb_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_TCB,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk pwm_clk = {
+ .name = "pwm_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_PWMC,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk tsc_clk = {
+ .name = "tsc_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_TSC,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk dma_clk = {
+ .name = "dma_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_DMA,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk uhphs_clk = {
+ .name = "uhphs_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_UHPHS,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk lcdc_clk = {
+ .name = "lcdc_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_LCDC,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk ac97_clk = {
+ .name = "ac97_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_AC97C,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk macb_clk = {
+ .name = "macb_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_EMAC,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk isi_clk = {
+ .name = "isi_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_ISI,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk udphs_clk = {
+ .name = "udphs_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_UDPHS,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+static struct clk mmc1_clk = {
+ .name = "mci1_clk",
+ .pmc_mask = 1 << AT91SAM9G45_ID_MCI1,
+ .type = CLK_TYPE_PERIPHERAL,
+};
+
+/* One additional fake clock for ohci */
+static struct clk ohci_clk = {
+ .name = "ohci_clk",
+ .pmc_mask = 0,
+ .type = CLK_TYPE_PERIPHERAL,
+ .parent = &uhphs_clk,
+};
+
+static struct clk *periph_clocks[] __initdata = {
+ &pioA_clk,
+ &pioB_clk,
+ &pioC_clk,
+ &pioDE_clk,
+ &usart0_clk,
+ &usart1_clk,
+ &usart2_clk,
+ &usart3_clk,
+ &mmc0_clk,
+ &twi0_clk,
+ &twi1_clk,
+ &spi0_clk,
+ &spi1_clk,
+ &ssc0_clk,
+ &ssc1_clk,
+ &tcb_clk,
+ &pwm_clk,
+ &tsc_clk,
+ &dma_clk,
+ &uhphs_clk,
+ &lcdc_clk,
+ &ac97_clk,
+ &macb_clk,
+ &isi_clk,
+ &udphs_clk,
+ &mmc1_clk,
+ // irq0
+ &ohci_clk,
+};
+
+/*
+ * The two programmable clocks.
+ * You must configure pin multiplexing to bring these signals out.
+ */
+static struct clk pck0 = {
+ .name = "pck0",
+ .pmc_mask = AT91_PMC_PCK0,
+ .type = CLK_TYPE_PROGRAMMABLE,
+ .id = 0,
+};
+static struct clk pck1 = {
+ .name = "pck1",
+ .pmc_mask = AT91_PMC_PCK1,
+ .type = CLK_TYPE_PROGRAMMABLE,
+ .id = 1,
+};
+
+static void __init at91sam9g45_register_clocks(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
+ clk_register(periph_clocks[i]);
+
+ clk_register(&pck0);
+ clk_register(&pck1);
+}
+
+/* --------------------------------------------------------------------
+ * GPIO
+ * -------------------------------------------------------------------- */
+
+static struct at91_gpio_bank at91sam9g45_gpio[] = {
+ {
+ .id = AT91SAM9G45_ID_PIOA,
+ .offset = AT91_PIOA,
+ .clock = &pioA_clk,
+ }, {
+ .id = AT91SAM9G45_ID_PIOB,
+ .offset = AT91_PIOB,
+ .clock = &pioB_clk,
+ }, {
+ .id = AT91SAM9G45_ID_PIOC,
+ .offset = AT91_PIOC,
+ .clock = &pioC_clk,
+ }, {
+ .id = AT91SAM9G45_ID_PIODE,
+ .offset = AT91_PIOD,
+ .clock = &pioDE_clk,
+ }, {
+ .id = AT91SAM9G45_ID_PIODE,
+ .offset = AT91_PIOE,
+ .clock = &pioDE_clk,
+ }
+};
+
+static void at91sam9g45_reset(void)
+{
+ at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
+}
+
+static void at91sam9g45_poweroff(void)
+{
+ at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
+}
+
+
+/* --------------------------------------------------------------------
+ * AT91SAM9G45 processor initialization
+ * -------------------------------------------------------------------- */
+
+void __init at91sam9g45_initialize(unsigned long main_clock)
+{
+ /* Map peripherals */
+ iotable_init(at91sam9g45_io_desc, ARRAY_SIZE(at91sam9g45_io_desc));
+
+ at91_arch_reset = at91sam9g45_reset;
+ pm_power_off = at91sam9g45_poweroff;
+ at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
+
+ /* Init clock subsystem */
+ at91_clock_init(main_clock);
+
+ /* Register the processor-specific clocks */
+ at91sam9g45_register_clocks();
+
+ /* Register GPIO subsystem */
+ at91_gpio_init(at91sam9g45_gpio, 5);
+}
+
+/* --------------------------------------------------------------------
+ * Interrupt initialization
+ * -------------------------------------------------------------------- */
+
+/*
+ * The default interrupt priority levels (0 = lowest, 7 = highest).
+ */
+static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = {
+ 7, /* Advanced Interrupt Controller (FIQ) */
+ 7, /* System Peripherals */
+ 1, /* Parallel IO Controller A */
+ 1, /* Parallel IO Controller B */
+ 1, /* Parallel IO Controller C */
+ 1, /* Parallel IO Controller D and E */
+ 0,
+ 5, /* USART 0 */
+ 5, /* USART 1 */
+ 5, /* USART 2 */
+ 5, /* USART 3 */
+ 0, /* Multimedia Card Interface 0 */
+ 6, /* Two-Wire Interface 0 */
+ 6, /* Two-Wire Interface 1 */
+ 5, /* Serial Peripheral Interface 0 */
+ 5, /* Serial Peripheral Interface 1 */
+ 4, /* Serial Synchronous Controller 0 */
+ 4, /* Serial Synchronous Controller 1 */
+ 0, /* Timer Counter 0, 1, 2, 3, 4 and 5 */
+ 0, /* Pulse Width Modulation Controller */
+ 0, /* Touch Screen Controller */
+ 0, /* DMA Controller */
+ 2, /* USB Host High Speed port */
+ 3, /* LDC Controller */
+ 5, /* AC97 Controller */
+ 3, /* Ethernet */
+ 0, /* Image Sensor Interface */
+ 2, /* USB Device High speed port */
+ 0,
+ 0, /* Multimedia Card Interface 1 */
+ 0,
+ 0, /* Advanced Interrupt Controller (IRQ0) */
+};
+
+void __init at91sam9g45_init_interrupts(unsigned int priority[NR_AIC_IRQS])
+{
+ if (!priority)
+ priority = at91sam9g45_default_irq_priority;
+
+ /* Initialize the AIC interrupt controller */
+ at91_aic_init(priority);
+
+ /* Enable GPIO interrupts */
+ at91_gpio_irq_setup();
+}
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
new file mode 100644
index 000000000000..d746e8621bc2
--- /dev/null
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -0,0 +1,1230 @@
+/*
+ * On-Chip devices setup code for the AT91SAM9G45 family
+ *
+ * Copyright (C) 2009 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/i2c-gpio.h>
+
+#include <linux/fb.h>
+#include <video/atmel_lcdc.h>
+
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/at91sam9g45.h>
+#include <mach/at91sam9g45_matrix.h>
+#include <mach/at91sam9_smc.h>
+
+#include "generic.h"
+
+
+/* --------------------------------------------------------------------
+ * USB Host (OHCI)
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+static u64 ohci_dmamask = DMA_BIT_MASK(32);
+static struct at91_usbh_data usbh_ohci_data;
+
+static struct resource usbh_ohci_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_OHCI_BASE,
+ .end = AT91SAM9G45_OHCI_BASE + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_UHPHS,
+ .end = AT91SAM9G45_ID_UHPHS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91_usbh_ohci_device = {
+ .name = "at91_ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &ohci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &usbh_ohci_data,
+ },
+ .resource = usbh_ohci_resources,
+ .num_resources = ARRAY_SIZE(usbh_ohci_resources),
+};
+
+void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data)
+{
+ int i;
+
+ if (!data)
+ return;
+
+ /* Enable VBus control for UHP ports */
+ for (i = 0; i < data->ports; i++) {
+ if (data->vbus_pin[i])
+ at91_set_gpio_output(data->vbus_pin[i], 0);
+ }
+
+ usbh_ohci_data = *data;
+ platform_device_register(&at91_usbh_ohci_device);
+}
+#else
+void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * USB HS Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE)
+static struct resource usba_udc_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_UDPHS_FIFO,
+ .end = AT91SAM9G45_UDPHS_FIFO + SZ_512K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_BASE_UDPHS,
+ .end = AT91SAM9G45_BASE_UDPHS + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = AT91SAM9G45_ID_UDPHS,
+ .end = AT91SAM9G45_ID_UDPHS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define EP(nam, idx, maxpkt, maxbk, dma, isoc) \
+ [idx] = { \
+ .name = nam, \
+ .index = idx, \
+ .fifo_size = maxpkt, \
+ .nr_banks = maxbk, \
+ .can_dma = dma, \
+ .can_isoc = isoc, \
+ }
+
+static struct usba_ep_data usba_udc_ep[] __initdata = {
+ EP("ep0", 0, 64, 1, 0, 0),
+ EP("ep1", 1, 1024, 2, 1, 1),
+ EP("ep2", 2, 1024, 2, 1, 1),
+ EP("ep3", 3, 1024, 3, 1, 0),
+ EP("ep4", 4, 1024, 3, 1, 0),
+ EP("ep5", 5, 1024, 3, 1, 1),
+ EP("ep6", 6, 1024, 3, 1, 1),
+};
+
+#undef EP
+
+/*
+ * pdata doesn't have room for any endpoints, so we need to
+ * append room for the ones we need right after it.
+ */
+static struct {
+ struct usba_platform_data pdata;
+ struct usba_ep_data ep[7];
+} usba_udc_data;
+
+static struct platform_device at91_usba_udc_device = {
+ .name = "atmel_usba_udc",
+ .id = -1,
+ .dev = {
+ .platform_data = &usba_udc_data.pdata,
+ },
+ .resource = usba_udc_resources,
+ .num_resources = ARRAY_SIZE(usba_udc_resources),
+};
+
+void __init at91_add_device_usba(struct usba_platform_data *data)
+{
+ usba_udc_data.pdata.vbus_pin = -EINVAL;
+ usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep);
+ memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep));;
+
+ if (data && data->vbus_pin > 0) {
+ at91_set_gpio_input(data->vbus_pin, 0);
+ at91_set_deglitch(data->vbus_pin, 1);
+ usba_udc_data.pdata.vbus_pin = data->vbus_pin;
+ }
+
+ /* Pullup pin is handled internally by USB device peripheral */
+
+ /* Clocks */
+ at91_clock_associate("utmi_clk", &at91_usba_udc_device.dev, "hclk");
+ at91_clock_associate("udphs_clk", &at91_usba_udc_device.dev, "pclk");
+
+ platform_device_register(&at91_usba_udc_device);
+}
+#else
+void __init at91_add_device_usba(struct usba_platform_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * Ethernet
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
+static u64 eth_dmamask = DMA_BIT_MASK(32);
+static struct at91_eth_data eth_data;
+
+static struct resource eth_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_EMAC,
+ .end = AT91SAM9G45_BASE_EMAC + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_EMAC,
+ .end = AT91SAM9G45_ID_EMAC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_eth_device = {
+ .name = "macb",
+ .id = -1,
+ .dev = {
+ .dma_mask = &eth_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &eth_data,
+ },
+ .resource = eth_resources,
+ .num_resources = ARRAY_SIZE(eth_resources),
+};
+
+void __init at91_add_device_eth(struct at91_eth_data *data)
+{
+ if (!data)
+ return;
+
+ if (data->phy_irq_pin) {
+ at91_set_gpio_input(data->phy_irq_pin, 0);
+ at91_set_deglitch(data->phy_irq_pin, 1);
+ }
+
+ /* Pins used for MII and RMII */
+ at91_set_A_periph(AT91_PIN_PA17, 0); /* ETXCK_EREFCK */
+ at91_set_A_periph(AT91_PIN_PA15, 0); /* ERXDV */
+ at91_set_A_periph(AT91_PIN_PA12, 0); /* ERX0 */
+ at91_set_A_periph(AT91_PIN_PA13, 0); /* ERX1 */
+ at91_set_A_periph(AT91_PIN_PA16, 0); /* ERXER */
+ at91_set_A_periph(AT91_PIN_PA14, 0); /* ETXEN */
+ at91_set_A_periph(AT91_PIN_PA10, 0); /* ETX0 */
+ at91_set_A_periph(AT91_PIN_PA11, 0); /* ETX1 */
+ at91_set_A_periph(AT91_PIN_PA19, 0); /* EMDIO */
+ at91_set_A_periph(AT91_PIN_PA18, 0); /* EMDC */
+
+ if (!data->is_rmii) {
+ at91_set_B_periph(AT91_PIN_PA29, 0); /* ECRS */
+ at91_set_B_periph(AT91_PIN_PA30, 0); /* ECOL */
+ at91_set_B_periph(AT91_PIN_PA8, 0); /* ERX2 */
+ at91_set_B_periph(AT91_PIN_PA9, 0); /* ERX3 */
+ at91_set_B_periph(AT91_PIN_PA28, 0); /* ERXCK */
+ at91_set_B_periph(AT91_PIN_PA6, 0); /* ETX2 */
+ at91_set_B_periph(AT91_PIN_PA7, 0); /* ETX3 */
+ at91_set_B_periph(AT91_PIN_PA27, 0); /* ETXER */
+ }
+
+ eth_data = *data;
+ platform_device_register(&at91sam9g45_eth_device);
+}
+#else
+void __init at91_add_device_eth(struct at91_eth_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * NAND / SmartMedia
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
+static struct atmel_nand_data nand_data;
+
+#define NAND_BASE AT91_CHIPSELECT_3
+
+static struct resource nand_resources[] = {
+ [0] = {
+ .start = NAND_BASE,
+ .end = NAND_BASE + SZ_256M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91_BASE_SYS + AT91_ECC,
+ .end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device at91sam9g45_nand_device = {
+ .name = "atmel_nand",
+ .id = -1,
+ .dev = {
+ .platform_data = &nand_data,
+ },
+ .resource = nand_resources,
+ .num_resources = ARRAY_SIZE(nand_resources),
+};
+
+void __init at91_add_device_nand(struct atmel_nand_data *data)
+{
+ unsigned long csa;
+
+ if (!data)
+ return;
+
+ csa = at91_sys_read(AT91_MATRIX_EBICSA);
+ at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA);
+
+ /* enable pin */
+ if (data->enable_pin)
+ at91_set_gpio_output(data->enable_pin, 1);
+
+ /* ready/busy pin */
+ if (data->rdy_pin)
+ at91_set_gpio_input(data->rdy_pin, 1);
+
+ /* card detect pin */
+ if (data->det_pin)
+ at91_set_gpio_input(data->det_pin, 1);
+
+ nand_data = *data;
+ platform_device_register(&at91sam9g45_nand_device);
+}
+#else
+void __init at91_add_device_nand(struct atmel_nand_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * TWI (i2c)
+ * -------------------------------------------------------------------- */
+
+/*
+ * Prefer the GPIO code since the TWI controller isn't robust
+ * (gets overruns and underruns under load) and can only issue
+ * repeated STARTs in one scenario (the driver doesn't yet handle them).
+ */
+#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
+static struct i2c_gpio_platform_data pdata_i2c0 = {
+ .sda_pin = AT91_PIN_PA20,
+ .sda_is_open_drain = 1,
+ .scl_pin = AT91_PIN_PA21,
+ .scl_is_open_drain = 1,
+ .udelay = 2, /* ~100 kHz */
+};
+
+static struct platform_device at91sam9g45_twi0_device = {
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev.platform_data = &pdata_i2c0,
+};
+
+static struct i2c_gpio_platform_data pdata_i2c1 = {
+ .sda_pin = AT91_PIN_PB10,
+ .sda_is_open_drain = 1,
+ .scl_pin = AT91_PIN_PB11,
+ .scl_is_open_drain = 1,
+ .udelay = 2, /* ~100 kHz */
+};
+
+static struct platform_device at91sam9g45_twi1_device = {
+ .name = "i2c-gpio",
+ .id = 1,
+ .dev.platform_data = &pdata_i2c1,
+};
+
+void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices)
+{
+ i2c_register_board_info(i2c_id, devices, nr_devices);
+
+ if (i2c_id == 0) {
+ at91_set_GPIO_periph(AT91_PIN_PA20, 1); /* TWD (SDA) */
+ at91_set_multi_drive(AT91_PIN_PA20, 1);
+
+ at91_set_GPIO_periph(AT91_PIN_PA21, 1); /* TWCK (SCL) */
+ at91_set_multi_drive(AT91_PIN_PA21, 1);
+
+ platform_device_register(&at91sam9g45_twi0_device);
+ } else {
+ at91_set_GPIO_periph(AT91_PIN_PB10, 1); /* TWD (SDA) */
+ at91_set_multi_drive(AT91_PIN_PB10, 1);
+
+ at91_set_GPIO_periph(AT91_PIN_PB11, 1); /* TWCK (SCL) */
+ at91_set_multi_drive(AT91_PIN_PB11, 1);
+
+ platform_device_register(&at91sam9g45_twi1_device);
+ }
+}
+
+#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
+static struct resource twi0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_TWI0,
+ .end = AT91SAM9G45_BASE_TWI0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_TWI0,
+ .end = AT91SAM9G45_ID_TWI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_twi0_device = {
+ .name = "at91_i2c",
+ .id = 0,
+ .resource = twi0_resources,
+ .num_resources = ARRAY_SIZE(twi0_resources),
+};
+
+static struct resource twi1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_TWI1,
+ .end = AT91SAM9G45_BASE_TWI1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_TWI1,
+ .end = AT91SAM9G45_ID_TWI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_twi1_device = {
+ .name = "at91_i2c",
+ .id = 1,
+ .resource = twi1_resources,
+ .num_resources = ARRAY_SIZE(twi1_resources),
+};
+
+void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices)
+{
+ i2c_register_board_info(i2c_id, devices, nr_devices);
+
+ /* pins used for TWI interface */
+ if (i2c_id == 0) {
+ at91_set_A_periph(AT91_PIN_PA20, 0); /* TWD */
+ at91_set_multi_drive(AT91_PIN_PA20, 1);
+
+ at91_set_A_periph(AT91_PIN_PA21, 0); /* TWCK */
+ at91_set_multi_drive(AT91_PIN_PA21, 1);
+
+ platform_device_register(&at91sam9g45_twi0_device);
+ } else {
+ at91_set_A_periph(AT91_PIN_PB10, 0); /* TWD */
+ at91_set_multi_drive(AT91_PIN_PB10, 1);
+
+ at91_set_A_periph(AT91_PIN_PB11, 0); /* TWCK */
+ at91_set_multi_drive(AT91_PIN_PB11, 1);
+
+ platform_device_register(&at91sam9g45_twi1_device);
+ }
+}
+#else
+void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * SPI
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+static struct resource spi0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_SPI0,
+ .end = AT91SAM9G45_BASE_SPI0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_SPI0,
+ .end = AT91SAM9G45_ID_SPI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_spi0_device = {
+ .name = "atmel_spi",
+ .id = 0,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = spi0_resources,
+ .num_resources = ARRAY_SIZE(spi0_resources),
+};
+
+static const unsigned spi0_standard_cs[4] = { AT91_PIN_PB3, AT91_PIN_PB18, AT91_PIN_PB19, AT91_PIN_PD27 };
+
+static struct resource spi1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_SPI1,
+ .end = AT91SAM9G45_BASE_SPI1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_SPI1,
+ .end = AT91SAM9G45_ID_SPI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_spi1_device = {
+ .name = "atmel_spi",
+ .id = 1,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = spi1_resources,
+ .num_resources = ARRAY_SIZE(spi1_resources),
+};
+
+static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB17, AT91_PIN_PD28, AT91_PIN_PD18, AT91_PIN_PD19 };
+
+void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
+{
+ int i;
+ unsigned long cs_pin;
+ short enable_spi0 = 0;
+ short enable_spi1 = 0;
+
+ /* Choose SPI chip-selects */
+ for (i = 0; i < nr_devices; i++) {
+ if (devices[i].controller_data)
+ cs_pin = (unsigned long) devices[i].controller_data;
+ else if (devices[i].bus_num == 0)
+ cs_pin = spi0_standard_cs[devices[i].chip_select];
+ else
+ cs_pin = spi1_standard_cs[devices[i].chip_select];
+
+ if (devices[i].bus_num == 0)
+ enable_spi0 = 1;
+ else
+ enable_spi1 = 1;
+
+ /* enable chip-select pin */
+ at91_set_gpio_output(cs_pin, 1);
+
+ /* pass chip-select pin to driver */
+ devices[i].controller_data = (void *) cs_pin;
+ }
+
+ spi_register_board_info(devices, nr_devices);
+
+ /* Configure SPI bus(es) */
+ if (enable_spi0) {
+ at91_set_A_periph(AT91_PIN_PB0, 0); /* SPI0_MISO */
+ at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI0_MOSI */
+ at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI0_SPCK */
+
+ at91_clock_associate("spi0_clk", &at91sam9g45_spi0_device.dev, "spi_clk");
+ platform_device_register(&at91sam9g45_spi0_device);
+ }
+ if (enable_spi1) {
+ at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_MISO */
+ at91_set_A_periph(AT91_PIN_PB15, 0); /* SPI1_MOSI */
+ at91_set_A_periph(AT91_PIN_PB16, 0); /* SPI1_SPCK */
+
+ at91_clock_associate("spi1_clk", &at91sam9g45_spi1_device.dev, "spi_clk");
+ platform_device_register(&at91sam9g45_spi1_device);
+ }
+}
+#else
+void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * LCD Controller
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
+static u64 lcdc_dmamask = DMA_BIT_MASK(32);
+static struct atmel_lcdfb_info lcdc_data;
+
+static struct resource lcdc_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_LCDC_BASE,
+ .end = AT91SAM9G45_LCDC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_LCDC,
+ .end = AT91SAM9G45_ID_LCDC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91_lcdc_device = {
+ .name = "atmel_lcdfb",
+ .id = 0,
+ .dev = {
+ .dma_mask = &lcdc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &lcdc_data,
+ },
+ .resource = lcdc_resources,
+ .num_resources = ARRAY_SIZE(lcdc_resources),
+};
+
+void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+{
+ if (!data)
+ return;
+
+ at91_set_A_periph(AT91_PIN_PE0, 0); /* LCDDPWR */
+
+ at91_set_A_periph(AT91_PIN_PE2, 0); /* LCDCC */
+ at91_set_A_periph(AT91_PIN_PE3, 0); /* LCDVSYNC */
+ at91_set_A_periph(AT91_PIN_PE4, 0); /* LCDHSYNC */
+ at91_set_A_periph(AT91_PIN_PE5, 0); /* LCDDOTCK */
+ at91_set_A_periph(AT91_PIN_PE6, 0); /* LCDDEN */
+ at91_set_A_periph(AT91_PIN_PE7, 0); /* LCDD0 */
+ at91_set_A_periph(AT91_PIN_PE8, 0); /* LCDD1 */
+ at91_set_A_periph(AT91_PIN_PE9, 0); /* LCDD2 */
+ at91_set_A_periph(AT91_PIN_PE10, 0); /* LCDD3 */
+ at91_set_A_periph(AT91_PIN_PE11, 0); /* LCDD4 */
+ at91_set_A_periph(AT91_PIN_PE12, 0); /* LCDD5 */
+ at91_set_A_periph(AT91_PIN_PE13, 0); /* LCDD6 */
+ at91_set_A_periph(AT91_PIN_PE14, 0); /* LCDD7 */
+ at91_set_A_periph(AT91_PIN_PE15, 0); /* LCDD8 */
+ at91_set_A_periph(AT91_PIN_PE16, 0); /* LCDD9 */
+ at91_set_A_periph(AT91_PIN_PE17, 0); /* LCDD10 */
+ at91_set_A_periph(AT91_PIN_PE18, 0); /* LCDD11 */
+ at91_set_A_periph(AT91_PIN_PE19, 0); /* LCDD12 */
+ at91_set_A_periph(AT91_PIN_PE20, 0); /* LCDD13 */
+ at91_set_A_periph(AT91_PIN_PE21, 0); /* LCDD14 */
+ at91_set_A_periph(AT91_PIN_PE22, 0); /* LCDD15 */
+ at91_set_A_periph(AT91_PIN_PE23, 0); /* LCDD16 */
+ at91_set_A_periph(AT91_PIN_PE24, 0); /* LCDD17 */
+ at91_set_A_periph(AT91_PIN_PE25, 0); /* LCDD18 */
+ at91_set_A_periph(AT91_PIN_PE26, 0); /* LCDD19 */
+ at91_set_A_periph(AT91_PIN_PE27, 0); /* LCDD20 */
+ at91_set_A_periph(AT91_PIN_PE28, 0); /* LCDD21 */
+ at91_set_A_periph(AT91_PIN_PE29, 0); /* LCDD22 */
+ at91_set_A_periph(AT91_PIN_PE30, 0); /* LCDD23 */
+
+ lcdc_data = *data;
+ platform_device_register(&at91_lcdc_device);
+}
+#else
+void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * Timer/Counter block
+ * -------------------------------------------------------------------- */
+
+#ifdef CONFIG_ATMEL_TCLIB
+static struct resource tcb0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_TCB0,
+ .end = AT91SAM9G45_BASE_TCB0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_TCB,
+ .end = AT91SAM9G45_ID_TCB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_tcb0_device = {
+ .name = "atmel_tcb",
+ .id = 0,
+ .resource = tcb0_resources,
+ .num_resources = ARRAY_SIZE(tcb0_resources),
+};
+
+/* TCB1 begins with TC3 */
+static struct resource tcb1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_TCB1,
+ .end = AT91SAM9G45_BASE_TCB1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_TCB,
+ .end = AT91SAM9G45_ID_TCB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_tcb1_device = {
+ .name = "atmel_tcb",
+ .id = 1,
+ .resource = tcb1_resources,
+ .num_resources = ARRAY_SIZE(tcb1_resources),
+};
+
+static void __init at91_add_device_tc(void)
+{
+ /* this chip has one clock and irq for all six TC channels */
+ at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+ platform_device_register(&at91sam9g45_tcb0_device);
+ at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+ platform_device_register(&at91sam9g45_tcb1_device);
+}
+#else
+static void __init at91_add_device_tc(void) { }
+#endif
+
+
+/* --------------------------------------------------------------------
+ * RTC
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE)
+static struct platform_device at91sam9g45_rtc_device = {
+ .name = "at91_rtc",
+ .id = -1,
+ .num_resources = 0,
+};
+
+static void __init at91_add_device_rtc(void)
+{
+ platform_device_register(&at91sam9g45_rtc_device);
+}
+#else
+static void __init at91_add_device_rtc(void) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * RTT
+ * -------------------------------------------------------------------- */
+
+static struct resource rtt_resources[] = {
+ {
+ .start = AT91_BASE_SYS + AT91_RTT,
+ .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device at91sam9g45_rtt_device = {
+ .name = "at91_rtt",
+ .id = 0,
+ .resource = rtt_resources,
+ .num_resources = ARRAY_SIZE(rtt_resources),
+};
+
+static void __init at91_add_device_rtt(void)
+{
+ platform_device_register(&at91sam9g45_rtt_device);
+}
+
+
+/* --------------------------------------------------------------------
+ * Watchdog
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_AT91SAM9_WATCHDOG) || defined(CONFIG_AT91SAM9_WATCHDOG_MODULE)
+static struct platform_device at91sam9g45_wdt_device = {
+ .name = "at91_wdt",
+ .id = -1,
+ .num_resources = 0,
+};
+
+static void __init at91_add_device_watchdog(void)
+{
+ platform_device_register(&at91sam9g45_wdt_device);
+}
+#else
+static void __init at91_add_device_watchdog(void) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * PWM
+ * --------------------------------------------------------------------*/
+
+#if defined(CONFIG_ATMEL_PWM) || defined(CONFIG_ATMEL_PWM_MODULE)
+static u32 pwm_mask;
+
+static struct resource pwm_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_PWMC,
+ .end = AT91SAM9G45_BASE_PWMC + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_PWMC,
+ .end = AT91SAM9G45_ID_PWMC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_pwm0_device = {
+ .name = "atmel_pwm",
+ .id = -1,
+ .dev = {
+ .platform_data = &pwm_mask,
+ },
+ .resource = pwm_resources,
+ .num_resources = ARRAY_SIZE(pwm_resources),
+};
+
+void __init at91_add_device_pwm(u32 mask)
+{
+ if (mask & (1 << AT91_PWM0))
+ at91_set_B_periph(AT91_PIN_PD24, 1); /* enable PWM0 */
+
+ if (mask & (1 << AT91_PWM1))
+ at91_set_B_periph(AT91_PIN_PD31, 1); /* enable PWM1 */
+
+ if (mask & (1 << AT91_PWM2))
+ at91_set_B_periph(AT91_PIN_PD26, 1); /* enable PWM2 */
+
+ if (mask & (1 << AT91_PWM3))
+ at91_set_B_periph(AT91_PIN_PD0, 1); /* enable PWM3 */
+
+ pwm_mask = mask;
+
+ platform_device_register(&at91sam9g45_pwm0_device);
+}
+#else
+void __init at91_add_device_pwm(u32 mask) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * SSC -- Synchronous Serial Controller
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE)
+static u64 ssc0_dmamask = DMA_BIT_MASK(32);
+
+static struct resource ssc0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_SSC0,
+ .end = AT91SAM9G45_BASE_SSC0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_SSC0,
+ .end = AT91SAM9G45_ID_SSC0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_ssc0_device = {
+ .name = "ssc",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ssc0_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = ssc0_resources,
+ .num_resources = ARRAY_SIZE(ssc0_resources),
+};
+
+static inline void configure_ssc0_pins(unsigned pins)
+{
+ if (pins & ATMEL_SSC_TF)
+ at91_set_A_periph(AT91_PIN_PD1, 1);
+ if (pins & ATMEL_SSC_TK)
+ at91_set_A_periph(AT91_PIN_PD0, 1);
+ if (pins & ATMEL_SSC_TD)
+ at91_set_A_periph(AT91_PIN_PD2, 1);
+ if (pins & ATMEL_SSC_RD)
+ at91_set_A_periph(AT91_PIN_PD3, 1);
+ if (pins & ATMEL_SSC_RK)
+ at91_set_A_periph(AT91_PIN_PD4, 1);
+ if (pins & ATMEL_SSC_RF)
+ at91_set_A_periph(AT91_PIN_PD5, 1);
+}
+
+static u64 ssc1_dmamask = DMA_BIT_MASK(32);
+
+static struct resource ssc1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_SSC1,
+ .end = AT91SAM9G45_BASE_SSC1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_SSC1,
+ .end = AT91SAM9G45_ID_SSC1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_ssc1_device = {
+ .name = "ssc",
+ .id = 1,
+ .dev = {
+ .dma_mask = &ssc1_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = ssc1_resources,
+ .num_resources = ARRAY_SIZE(ssc1_resources),
+};
+
+static inline void configure_ssc1_pins(unsigned pins)
+{
+ if (pins & ATMEL_SSC_TF)
+ at91_set_A_periph(AT91_PIN_PD14, 1);
+ if (pins & ATMEL_SSC_TK)
+ at91_set_A_periph(AT91_PIN_PD12, 1);
+ if (pins & ATMEL_SSC_TD)
+ at91_set_A_periph(AT91_PIN_PD10, 1);
+ if (pins & ATMEL_SSC_RD)
+ at91_set_A_periph(AT91_PIN_PD11, 1);
+ if (pins & ATMEL_SSC_RK)
+ at91_set_A_periph(AT91_PIN_PD13, 1);
+ if (pins & ATMEL_SSC_RF)
+ at91_set_A_periph(AT91_PIN_PD15, 1);
+}
+
+/*
+ * SSC controllers are accessed through library code, instead of any
+ * kind of all-singing/all-dancing driver. For example one could be
+ * used by a particular I2S audio codec's driver, while another one
+ * on the same system might be used by a custom data capture driver.
+ */
+void __init at91_add_device_ssc(unsigned id, unsigned pins)
+{
+ struct platform_device *pdev;
+
+ /*
+ * NOTE: caller is responsible for passing information matching
+ * "pins" to whatever will be using each particular controller.
+ */
+ switch (id) {
+ case AT91SAM9G45_ID_SSC0:
+ pdev = &at91sam9g45_ssc0_device;
+ configure_ssc0_pins(pins);
+ at91_clock_associate("ssc0_clk", &pdev->dev, "pclk");
+ break;
+ case AT91SAM9G45_ID_SSC1:
+ pdev = &at91sam9g45_ssc1_device;
+ configure_ssc1_pins(pins);
+ at91_clock_associate("ssc1_clk", &pdev->dev, "pclk");
+ break;
+ default:
+ return;
+ }
+
+ platform_device_register(pdev);
+}
+
+#else
+void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * UART
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_SERIAL_ATMEL)
+static struct resource dbgu_resources[] = {
+ [0] = {
+ .start = AT91_VA_BASE_SYS + AT91_DBGU,
+ .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91_ID_SYS,
+ .end = AT91_ID_SYS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct atmel_uart_data dbgu_data = {
+ .use_dma_tx = 0,
+ .use_dma_rx = 0,
+ .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+};
+
+static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at91sam9g45_dbgu_device = {
+ .name = "atmel_usart",
+ .id = 0,
+ .dev = {
+ .dma_mask = &dbgu_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &dbgu_data,
+ },
+ .resource = dbgu_resources,
+ .num_resources = ARRAY_SIZE(dbgu_resources),
+};
+
+static inline void configure_dbgu_pins(void)
+{
+ at91_set_A_periph(AT91_PIN_PB12, 0); /* DRXD */
+ at91_set_A_periph(AT91_PIN_PB13, 1); /* DTXD */
+}
+
+static struct resource uart0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_US0,
+ .end = AT91SAM9G45_BASE_US0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_US0,
+ .end = AT91SAM9G45_ID_US0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct atmel_uart_data uart0_data = {
+ .use_dma_tx = 1,
+ .use_dma_rx = 1,
+};
+
+static u64 uart0_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at91sam9g45_uart0_device = {
+ .name = "atmel_usart",
+ .id = 1,
+ .dev = {
+ .dma_mask = &uart0_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &uart0_data,
+ },
+ .resource = uart0_resources,
+ .num_resources = ARRAY_SIZE(uart0_resources),
+};
+
+static inline void configure_usart0_pins(unsigned pins)
+{
+ at91_set_A_periph(AT91_PIN_PB19, 1); /* TXD0 */
+ at91_set_A_periph(AT91_PIN_PB18, 0); /* RXD0 */
+
+ if (pins & ATMEL_UART_RTS)
+ at91_set_B_periph(AT91_PIN_PB17, 0); /* RTS0 */
+ if (pins & ATMEL_UART_CTS)
+ at91_set_B_periph(AT91_PIN_PB15, 0); /* CTS0 */
+}
+
+static struct resource uart1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_US1,
+ .end = AT91SAM9G45_BASE_US1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_US1,
+ .end = AT91SAM9G45_ID_US1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct atmel_uart_data uart1_data = {
+ .use_dma_tx = 1,
+ .use_dma_rx = 1,
+};
+
+static u64 uart1_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at91sam9g45_uart1_device = {
+ .name = "atmel_usart",
+ .id = 2,
+ .dev = {
+ .dma_mask = &uart1_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &uart1_data,
+ },
+ .resource = uart1_resources,
+ .num_resources = ARRAY_SIZE(uart1_resources),
+};
+
+static inline void configure_usart1_pins(unsigned pins)
+{
+ at91_set_A_periph(AT91_PIN_PB4, 1); /* TXD1 */
+ at91_set_A_periph(AT91_PIN_PB5, 0); /* RXD1 */
+
+ if (pins & ATMEL_UART_RTS)
+ at91_set_A_periph(AT91_PIN_PD16, 0); /* RTS1 */
+ if (pins & ATMEL_UART_CTS)
+ at91_set_A_periph(AT91_PIN_PD17, 0); /* CTS1 */
+}
+
+static struct resource uart2_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_US2,
+ .end = AT91SAM9G45_BASE_US2 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_US2,
+ .end = AT91SAM9G45_ID_US2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct atmel_uart_data uart2_data = {
+ .use_dma_tx = 1,
+ .use_dma_rx = 1,
+};
+
+static u64 uart2_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at91sam9g45_uart2_device = {
+ .name = "atmel_usart",
+ .id = 3,
+ .dev = {
+ .dma_mask = &uart2_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &uart2_data,
+ },
+ .resource = uart2_resources,
+ .num_resources = ARRAY_SIZE(uart2_resources),
+};
+
+static inline void configure_usart2_pins(unsigned pins)
+{
+ at91_set_A_periph(AT91_PIN_PB6, 1); /* TXD2 */
+ at91_set_A_periph(AT91_PIN_PB7, 0); /* RXD2 */
+
+ if (pins & ATMEL_UART_RTS)
+ at91_set_B_periph(AT91_PIN_PC9, 0); /* RTS2 */
+ if (pins & ATMEL_UART_CTS)
+ at91_set_B_periph(AT91_PIN_PC11, 0); /* CTS2 */
+}
+
+static struct resource uart3_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_US3,
+ .end = AT91SAM9G45_BASE_US3 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_US3,
+ .end = AT91SAM9G45_ID_US3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct atmel_uart_data uart3_data = {
+ .use_dma_tx = 1,
+ .use_dma_rx = 1,
+};
+
+static u64 uart3_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at91sam9g45_uart3_device = {
+ .name = "atmel_usart",
+ .id = 4,
+ .dev = {
+ .dma_mask = &uart3_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &uart3_data,
+ },
+ .resource = uart3_resources,
+ .num_resources = ARRAY_SIZE(uart3_resources),
+};
+
+static inline void configure_usart3_pins(unsigned pins)
+{
+ at91_set_A_periph(AT91_PIN_PB8, 1); /* TXD3 */
+ at91_set_A_periph(AT91_PIN_PB9, 0); /* RXD3 */
+
+ if (pins & ATMEL_UART_RTS)
+ at91_set_B_periph(AT91_PIN_PA23, 0); /* RTS3 */
+ if (pins & ATMEL_UART_CTS)
+ at91_set_B_periph(AT91_PIN_PA24, 0); /* CTS3 */
+}
+
+static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
+struct platform_device *atmel_default_console_device; /* the serial console device */
+
+void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
+{
+ struct platform_device *pdev;
+
+ switch (id) {
+ case 0: /* DBGU */
+ pdev = &at91sam9g45_dbgu_device;
+ configure_dbgu_pins();
+ at91_clock_associate("mck", &pdev->dev, "usart");
+ break;
+ case AT91SAM9G45_ID_US0:
+ pdev = &at91sam9g45_uart0_device;
+ configure_usart0_pins(pins);
+ at91_clock_associate("usart0_clk", &pdev->dev, "usart");
+ break;
+ case AT91SAM9G45_ID_US1:
+ pdev = &at91sam9g45_uart1_device;
+ configure_usart1_pins(pins);
+ at91_clock_associate("usart1_clk", &pdev->dev, "usart");
+ break;
+ case AT91SAM9G45_ID_US2:
+ pdev = &at91sam9g45_uart2_device;
+ configure_usart2_pins(pins);
+ at91_clock_associate("usart2_clk", &pdev->dev, "usart");
+ break;
+ case AT91SAM9G45_ID_US3:
+ pdev = &at91sam9g45_uart3_device;
+ configure_usart3_pins(pins);
+ at91_clock_associate("usart3_clk", &pdev->dev, "usart");
+ break;
+ default:
+ return;
+ }
+ pdev->id = portnr; /* update to mapped ID */
+
+ if (portnr < ATMEL_MAX_UART)
+ at91_uarts[portnr] = pdev;
+}
+
+void __init at91_set_serial_console(unsigned portnr)
+{
+ if (portnr < ATMEL_MAX_UART)
+ atmel_default_console_device = at91_uarts[portnr];
+}
+
+void __init at91_add_device_serial(void)
+{
+ int i;
+
+ for (i = 0; i < ATMEL_MAX_UART; i++) {
+ if (at91_uarts[i])
+ platform_device_register(at91_uarts[i]);
+ }
+
+ if (!atmel_default_console_device)
+ printk(KERN_INFO "AT91: No default serial console defined.\n");
+}
+#else
+void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
+void __init at91_set_serial_console(unsigned portnr) {}
+void __init at91_add_device_serial(void) {}
+#endif
+
+
+/* -------------------------------------------------------------------- */
+/*
+ * These devices are always present and don't need any board-specific
+ * setup.
+ */
+static int __init at91_add_standard_devices(void)
+{
+ at91_add_device_rtc();
+ at91_add_device_rtt();
+ at91_add_device_watchdog();
+ at91_add_device_tc();
+ return 0;
+}
+
+arch_initcall(at91_add_standard_devices);
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index d5266da55311..f9b19993a7a9 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -287,7 +287,11 @@ static void __init ek_add_device_ts(void) {}
*/
static struct at73c213_board_info at73c213_data = {
.ssc_id = 1,
+#if defined(CONFIG_MACH_AT91SAM9261EK)
.shortname = "AT91SAM9261-EK external DAC",
+#else
+ .shortname = "AT91SAM9G10-EK external DAC",
+#endif
};
#if defined(CONFIG_SND_AT73C213) || defined(CONFIG_SND_AT73C213_MODULE)
@@ -414,6 +418,9 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
.default_monspecs = &at91fb_default_stn_monspecs,
.atmel_lcdfb_power_control = at91_lcdc_stn_power_control,
.guard_time = 1,
+#if defined(CONFIG_MACH_AT91SAM9G10EK)
+ .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB,
+#endif
};
#else
@@ -467,6 +474,9 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
.default_monspecs = &at91fb_default_tft_monspecs,
.atmel_lcdfb_power_control = at91_lcdc_tft_power_control,
.guard_time = 1,
+#if defined(CONFIG_MACH_AT91SAM9G10EK)
+ .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB,
+#endif
};
#endif
@@ -600,7 +610,11 @@ static void __init ek_board_init(void)
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
}
+#if defined(CONFIG_MACH_AT91SAM9261EK)
MACHINE_START(AT91SAM9261EK, "Atmel AT91SAM9261-EK")
+#else
+MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK")
+#endif
/* Maintainer: Atmel */
.phys_io = AT91_BASE_SYS,
.io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index cc270beadd5d..a55398ed1211 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -24,6 +24,8 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/at73c213.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
#include <linux/clk.h>
#include <mach/hardware.h>
@@ -218,6 +220,56 @@ static struct gpio_led ek_leds[] = {
}
};
+
+/*
+ * GPIO Buttons
+ */
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button ek_buttons[] = {
+ {
+ .gpio = AT91_PIN_PA30,
+ .code = BTN_3,
+ .desc = "Button 3",
+ .active_low = 1,
+ .wakeup = 1,
+ },
+ {
+ .gpio = AT91_PIN_PA31,
+ .code = BTN_4,
+ .desc = "Button 4",
+ .active_low = 1,
+ .wakeup = 1,
+ }
+};
+
+static struct gpio_keys_platform_data ek_button_data = {
+ .buttons = ek_buttons,
+ .nbuttons = ARRAY_SIZE(ek_buttons),
+};
+
+static struct platform_device ek_button_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &ek_button_data,
+ }
+};
+
+static void __init ek_add_device_buttons(void)
+{
+ at91_set_gpio_input(AT91_PIN_PA30, 1); /* btn3 */
+ at91_set_deglitch(AT91_PIN_PA30, 1);
+ at91_set_gpio_input(AT91_PIN_PA31, 1); /* btn4 */
+ at91_set_deglitch(AT91_PIN_PA31, 1);
+
+ platform_device_register(&ek_button_device);
+}
+#else
+static void __init ek_add_device_buttons(void) {}
+#endif
+
+
static struct i2c_board_info __initdata ek_i2c_devices[] = {
{
I2C_BOARD_INFO("24c512", 0x50),
@@ -245,6 +297,8 @@ static void __init ek_board_init(void)
at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices));
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
+ /* Push Buttons */
+ ek_add_device_buttons();
/* PCK0 provides MCLK to the WM8731 */
at91_set_B_periph(AT91_PIN_PC1, 0);
/* SSC (for WM8731) */
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
new file mode 100644
index 000000000000..b8558eae5229
--- /dev/null
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -0,0 +1,389 @@
+/*
+ * Board-specific setup code for the AT91SAM9M10G45 Evaluation Kit family
+ *
+ * Covers: * AT91SAM9G45-EKES board
+ * * AT91SAM9M10G45-EK board
+ *
+ * Copyright (C) 2009 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/fb.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+#include <video/atmel_lcdc.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/hardware.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/at91_shdwc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init ek_map_io(void)
+{
+ /* Initialize processor: 12.000 MHz crystal */
+ at91sam9g45_initialize(12000000);
+
+ /* DGBU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 not connected on the -EK board */
+ /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */
+ at91_register_uart(AT91SAM9G45_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init ek_init_irq(void)
+{
+ at91sam9g45_init_interrupts(NULL);
+}
+
+
+/*
+ * USB HS Host port (common to OHCI & EHCI)
+ */
+static struct at91_usbh_data __initdata ek_usbh_hs_data = {
+ .ports = 2,
+ .vbus_pin = {AT91_PIN_PD1, AT91_PIN_PD3},
+};
+
+
+/*
+ * USB HS Device port
+ */
+static struct usba_platform_data __initdata ek_usba_udc_data = {
+ .vbus_pin = AT91_PIN_PB19,
+};
+
+
+/*
+ * SPI devices.
+ */
+static struct spi_board_info ek_spi_devices[] = {
+ { /* DataFlash chip */
+ .modalias = "mtd_dataflash",
+ .chip_select = 0,
+ .max_speed_hz = 15 * 1000 * 1000,
+ .bus_num = 0,
+ },
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata ek_macb_data = {
+ .phy_irq_pin = AT91_PIN_PD5,
+ .is_rmii = 1,
+};
+
+
+/*
+ * NAND flash
+ */
+static struct mtd_partition __initdata ek_nand_partition[] = {
+ {
+ .name = "Partition 1",
+ .offset = 0,
+ .size = SZ_64M,
+ },
+ {
+ .name = "Partition 2",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
+{
+ *num_partitions = ARRAY_SIZE(ek_nand_partition);
+ return ek_nand_partition;
+}
+
+/* det_pin is not connected */
+static struct atmel_nand_data __initdata ek_nand_data = {
+ .ale = 21,
+ .cle = 22,
+ .rdy_pin = AT91_PIN_PC8,
+ .enable_pin = AT91_PIN_PC14,
+ .partition_info = nand_partitions,
+#if defined(CONFIG_MTD_NAND_AT91_BUSWIDTH_16)
+ .bus_width_16 = 1,
+#else
+ .bus_width_16 = 0,
+#endif
+};
+
+static struct sam9_smc_config __initdata ek_nand_smc_config = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 2,
+ .ncs_write_setup = 0,
+ .nwe_setup = 2,
+
+ .ncs_read_pulse = 4,
+ .nrd_pulse = 4,
+ .ncs_write_pulse = 4,
+ .nwe_pulse = 4,
+
+ .read_cycle = 7,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
+ .tdf_cycles = 3,
+};
+
+static void __init ek_add_device_nand(void)
+{
+ /* setup bus-width (8 or 16) */
+ if (ek_nand_data.bus_width_16)
+ ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
+ else
+ ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
+
+ /* configure chip-select 3 (NAND) */
+ sam9_smc_configure(3, &ek_nand_smc_config);
+
+ at91_add_device_nand(&ek_nand_data);
+}
+
+
+/*
+ * LCD Controller
+ */
+#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
+static struct fb_videomode at91_tft_vga_modes[] = {
+ {
+ .name = "LG",
+ .refresh = 60,
+ .xres = 480, .yres = 272,
+ .pixclock = KHZ2PICOS(9000),
+
+ .left_margin = 1, .right_margin = 1,
+ .upper_margin = 40, .lower_margin = 1,
+ .hsync_len = 45, .vsync_len = 1,
+
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+};
+
+static struct fb_monspecs at91fb_default_monspecs = {
+ .manufacturer = "LG",
+ .monitor = "LB043WQ1",
+
+ .modedb = at91_tft_vga_modes,
+ .modedb_len = ARRAY_SIZE(at91_tft_vga_modes),
+ .hfmin = 15000,
+ .hfmax = 17640,
+ .vfmin = 57,
+ .vfmax = 67,
+};
+
+#define AT91SAM9G45_DEFAULT_LCDCON2 (ATMEL_LCDC_MEMOR_LITTLE \
+ | ATMEL_LCDC_DISTYPE_TFT \
+ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
+
+/* Driver datas */
+static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+ .lcdcon_is_backlight = true,
+ .default_bpp = 32,
+ .default_dmacon = ATMEL_LCDC_DMAEN,
+ .default_lcdcon2 = AT91SAM9G45_DEFAULT_LCDCON2,
+ .default_monspecs = &at91fb_default_monspecs,
+ .guard_time = 9,
+ .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB,
+};
+
+#else
+static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+#endif
+
+
+/*
+ * GPIO Buttons
+ */
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button ek_buttons[] = {
+ { /* BP1, "leftclic" */
+ .code = BTN_LEFT,
+ .gpio = AT91_PIN_PB6,
+ .active_low = 1,
+ .desc = "left_click",
+ .wakeup = 1,
+ },
+ { /* BP2, "rightclic" */
+ .code = BTN_RIGHT,
+ .gpio = AT91_PIN_PB7,
+ .active_low = 1,
+ .desc = "right_click",
+ .wakeup = 1,
+ },
+ /* BP3, "joystick" */
+ {
+ .code = KEY_LEFT,
+ .gpio = AT91_PIN_PB14,
+ .active_low = 1,
+ .desc = "Joystick Left",
+ },
+ {
+ .code = KEY_RIGHT,
+ .gpio = AT91_PIN_PB15,
+ .active_low = 1,
+ .desc = "Joystick Right",
+ },
+ {
+ .code = KEY_UP,
+ .gpio = AT91_PIN_PB16,
+ .active_low = 1,
+ .desc = "Joystick Up",
+ },
+ {
+ .code = KEY_DOWN,
+ .gpio = AT91_PIN_PB17,
+ .active_low = 1,
+ .desc = "Joystick Down",
+ },
+ {
+ .code = KEY_ENTER,
+ .gpio = AT91_PIN_PB18,
+ .active_low = 1,
+ .desc = "Joystick Press",
+ },
+};
+
+static struct gpio_keys_platform_data ek_button_data = {
+ .buttons = ek_buttons,
+ .nbuttons = ARRAY_SIZE(ek_buttons),
+};
+
+static struct platform_device ek_button_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &ek_button_data,
+ }
+};
+
+static void __init ek_add_device_buttons(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ek_buttons); i++) {
+ at91_set_GPIO_periph(ek_buttons[i].gpio, 1);
+ at91_set_deglitch(ek_buttons[i].gpio, 1);
+ }
+
+ platform_device_register(&ek_button_device);
+}
+#else
+static void __init ek_add_device_buttons(void) {}
+#endif
+
+
+/*
+ * LEDs ... these could all be PWM-driven, for variable brightness
+ */
+static struct gpio_led ek_leds[] = {
+ { /* "top" led, red, powerled */
+ .name = "d8",
+ .gpio = AT91_PIN_PD30,
+ .default_trigger = "heartbeat",
+ },
+ { /* "left" led, green, userled2, pwm3 */
+ .name = "d6",
+ .gpio = AT91_PIN_PD0,
+ .active_low = 1,
+ .default_trigger = "nand-disk",
+ },
+#if !(defined(CONFIG_LEDS_ATMEL_PWM) || defined(CONFIG_LEDS_ATMEL_PWM_MODULE))
+ { /* "right" led, green, userled1, pwm1 */
+ .name = "d7",
+ .gpio = AT91_PIN_PD31,
+ .active_low = 1,
+ .default_trigger = "mmc0",
+ },
+#endif
+};
+
+
+/*
+ * PWM Leds
+ */
+static struct gpio_led ek_pwm_led[] = {
+#if defined(CONFIG_LEDS_ATMEL_PWM) || defined(CONFIG_LEDS_ATMEL_PWM_MODULE)
+ { /* "right" led, green, userled1, pwm1 */
+ .name = "d7",
+ .gpio = 1, /* is PWM channel number */
+ .active_low = 1,
+ .default_trigger = "none",
+ },
+#endif
+};
+
+
+
+static void __init ek_board_init(void)
+{
+ /* Serial */
+ at91_add_device_serial();
+ /* USB HS Host */
+ at91_add_device_usbh_ohci(&ek_usbh_hs_data);
+ /* USB HS Device */
+ at91_add_device_usba(&ek_usba_udc_data);
+ /* SPI */
+ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices));
+ /* Ethernet */
+ at91_add_device_eth(&ek_macb_data);
+ /* NAND */
+ ek_add_device_nand();
+ /* I2C */
+ at91_add_device_i2c(0, NULL, 0);
+ /* LCD Controller */
+ at91_add_device_lcdc(&ek_lcdc_data);
+ /* Push Buttons */
+ ek_add_device_buttons();
+ /* LEDs */
+ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
+ at91_pwm_leds(ek_pwm_led, ARRAY_SIZE(ek_pwm_led));
+}
+
+MACHINE_START(AT91SAM9G45EKES, "Atmel AT91SAM9G45-EKES")
+ /* Maintainer: Atmel */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = ek_map_io,
+ .init_irq = ek_init_irq,
+ .init_machine = ek_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 35e12a49d1a6..9d07679efce7 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -15,6 +15,8 @@
#include <linux/spi/spi.h>
#include <linux/fb.h>
#include <linux/clk.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
#include <video/atmel_lcdc.h>
@@ -186,19 +188,21 @@ static struct fb_monspecs at91fb_default_monspecs = {
static void at91_lcdc_power_control(int on)
{
if (on)
- at91_set_gpio_value(AT91_PIN_PA30, 0); /* power up */
+ at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */
else
- at91_set_gpio_value(AT91_PIN_PA30, 1); /* power down */
+ at91_set_gpio_value(AT91_PIN_PC1, 1); /* power down */
}
/* Driver datas */
static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+ .lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2,
.default_monspecs = &at91fb_default_monspecs,
.atmel_lcdfb_power_control = at91_lcdc_power_control,
.guard_time = 1,
+ .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB,
};
#else
@@ -206,6 +210,79 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data;
#endif
+/*
+ * LEDs
+ */
+static struct gpio_led ek_leds[] = {
+ { /* "bottom" led, green, userled1 to be defined */
+ .name = "ds1",
+ .gpio = AT91_PIN_PD15,
+ .active_low = 1,
+ .default_trigger = "none",
+ },
+ { /* "bottom" led, green, userled2 to be defined */
+ .name = "ds2",
+ .gpio = AT91_PIN_PD16,
+ .active_low = 1,
+ .default_trigger = "none",
+ },
+ { /* "power" led, yellow */
+ .name = "ds3",
+ .gpio = AT91_PIN_PD14,
+ .default_trigger = "heartbeat",
+ }
+};
+
+
+/*
+ * GPIO Buttons
+ */
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button ek_buttons[] = {
+ {
+ .gpio = AT91_PIN_PB0,
+ .code = BTN_2,
+ .desc = "Right Click",
+ .active_low = 1,
+ .wakeup = 1,
+ },
+ {
+ .gpio = AT91_PIN_PB1,
+ .code = BTN_1,
+ .desc = "Left Click",
+ .active_low = 1,
+ .wakeup = 1,
+ }
+};
+
+static struct gpio_keys_platform_data ek_button_data = {
+ .buttons = ek_buttons,
+ .nbuttons = ARRAY_SIZE(ek_buttons),
+};
+
+static struct platform_device ek_button_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &ek_button_data,
+ }
+};
+
+static void __init ek_add_device_buttons(void)
+{
+ at91_set_gpio_input(AT91_PIN_PB1, 1); /* btn1 */
+ at91_set_deglitch(AT91_PIN_PB1, 1);
+ at91_set_gpio_input(AT91_PIN_PB0, 1); /* btn2 */
+ at91_set_deglitch(AT91_PIN_PB0, 1);
+
+ platform_device_register(&ek_button_device);
+}
+#else
+static void __init ek_add_device_buttons(void) {}
+#endif
+
+
static void __init ek_board_init(void)
{
/* Serial */
@@ -224,6 +301,10 @@ static void __init ek_board_init(void)
at91_add_device_lcdc(&ek_lcdc_data);
/* Touch Screen Controller */
at91_add_device_tsadcc();
+ /* LEDs */
+ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
+ /* Push Buttons */
+ ek_add_device_buttons();
}
MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK")
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index bac578fe0d3d..c042dcf4725f 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -47,20 +47,25 @@
* Chips have some kind of clocks : group them by functionality
*/
#define cpu_has_utmi() ( cpu_is_at91cap9() \
- || cpu_is_at91sam9rl())
+ || cpu_is_at91sam9rl() \
+ || cpu_is_at91sam9g45())
-#define cpu_has_800M_plla() (cpu_is_at91sam9g20())
+#define cpu_has_800M_plla() ( cpu_is_at91sam9g20() \
+ || cpu_is_at91sam9g45())
-#define cpu_has_pllb() (!cpu_is_at91sam9rl())
+#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
-#define cpu_has_upll() (0)
+#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
+ || cpu_is_at91sam9g45()))
+
+#define cpu_has_upll() (cpu_is_at91sam9g45())
/* USB host HS & FS */
#define cpu_has_uhp() (!cpu_is_at91sam9rl())
/* USB device FS only */
-#define cpu_has_udpfs() (!cpu_is_at91sam9rl())
-
+#define cpu_has_udpfs() (!(cpu_is_at91sam9rl() \
+ || cpu_is_at91sam9g45()))
static LIST_HEAD(clocks);
static DEFINE_SPINLOCK(clk_lock);
@@ -133,6 +138,13 @@ static void pmc_uckr_mode(struct clk *clk, int is_on)
{
unsigned int uckr = at91_sys_read(AT91_CKGR_UCKR);
+ if (cpu_is_at91sam9g45()) {
+ if (is_on)
+ uckr |= AT91_PMC_BIASEN;
+ else
+ uckr &= ~AT91_PMC_BIASEN;
+ }
+
if (is_on) {
is_on = AT91_PMC_LOCKU;
at91_sys_write(AT91_CKGR_UCKR, uckr | clk->pmc_mask);
@@ -310,6 +322,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
unsigned long flags;
unsigned prescale;
unsigned long actual;
+ unsigned long prev = ULONG_MAX;
if (!clk_is_programmable(clk))
return -EINVAL;
@@ -317,8 +330,16 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
actual = clk->parent->rate_hz;
for (prescale = 0; prescale < 7; prescale++) {
- if (actual && actual <= rate)
+ if (actual > rate)
+ prev = actual;
+
+ if (actual && actual <= rate) {
+ if ((prev - rate) < (rate - actual)) {
+ actual = prev;
+ prescale--;
+ }
break;
+ }
actual >>= 1;
}
@@ -373,6 +394,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return -EBUSY;
if (!clk_is_primary(parent) || !clk_is_programmable(clk))
return -EINVAL;
+
+ if (cpu_is_at91sam9rl() && parent->id == AT91_PMC_CSS_PLLB)
+ return -EINVAL;
+
spin_lock_irqsave(&clk_lock, flags);
clk->rate_hz = parent->rate_hz;
@@ -601,7 +626,9 @@ static void __init at91_pllb_usbfs_clock_init(unsigned long main_clock)
uhpck.pmc_mask = AT91RM9200_PMC_UHP;
udpck.pmc_mask = AT91RM9200_PMC_UDP;
at91_sys_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP);
- } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) {
+ } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() ||
+ cpu_is_at91sam9263() || cpu_is_at91sam9g20() ||
+ cpu_is_at91sam9g10()) {
uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
udpck.pmc_mask = AT91SAM926x_PMC_UDP;
} else if (cpu_is_at91cap9()) {
@@ -637,6 +664,7 @@ int __init at91_clock_init(unsigned long main_clock)
{
unsigned tmp, freq, mckr;
int i;
+ int pll_overclock = false;
/*
* When the bootloader initialized the main oscillator correctly,
@@ -654,12 +682,25 @@ int __init at91_clock_init(unsigned long main_clock)
/* report if PLLA is more than mildly overclocked */
plla.rate_hz = at91_pll_rate(&plla, main_clock, at91_sys_read(AT91_CKGR_PLLAR));
- if ((!cpu_has_800M_plla() && plla.rate_hz > 209000000)
- || (cpu_has_800M_plla() && plla.rate_hz > 800000000))
+ if (cpu_has_300M_plla()) {
+ if (plla.rate_hz > 300000000)
+ pll_overclock = true;
+ } else if (cpu_has_800M_plla()) {
+ if (plla.rate_hz > 800000000)
+ pll_overclock = true;
+ } else {
+ if (plla.rate_hz > 209000000)
+ pll_overclock = true;
+ }
+ if (pll_overclock)
pr_info("Clocks: PLLA overclocked, %ld MHz\n", plla.rate_hz / 1000000);
+ if (cpu_is_at91sam9g45()) {
+ mckr = at91_sys_read(AT91_PMC_MCKR);
+ plla.rate_hz /= (1 << ((mckr & AT91_PMC_PLLADIV2) >> 12)); /* plla divisor by 2 */
+ }
- if (cpu_has_upll() && !cpu_has_pllb()) {
+ if (!cpu_has_pllb() && cpu_has_upll()) {
/* setup UTMI clock as the fourth primary clock
* (instead of pllb) */
utmi_clk.type |= CLK_TYPE_PRIMARY;
@@ -701,6 +742,9 @@ int __init at91_clock_init(unsigned long main_clock)
freq / ((mckr & AT91_PMC_MDIV) >> 7) : freq; /* mdiv ; (x >> 7) = ((x >> 8) * 2) */
if (mckr & AT91_PMC_PDIV)
freq /= 2; /* processor clock division */
+ } else if (cpu_is_at91sam9g45()) {
+ mck.rate_hz = (mckr & AT91_PMC_MDIV) == AT91SAM9_PMC_MDIV_3 ?
+ freq / 3 : freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8)); /* mdiv */
} else {
mck.rate_hz = freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8)); /* mdiv */
}
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index b5daf7f5e011..88e413b38480 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -14,6 +14,7 @@ extern void __init at91sam9260_initialize(unsigned long main_clock);
extern void __init at91sam9261_initialize(unsigned long main_clock);
extern void __init at91sam9263_initialize(unsigned long main_clock);
extern void __init at91sam9rl_initialize(unsigned long main_clock);
+extern void __init at91sam9g45_initialize(unsigned long main_clock);
extern void __init at91x40_initialize(unsigned long main_clock);
extern void __init at91cap9_initialize(unsigned long main_clock);
@@ -23,6 +24,7 @@ extern void __init at91sam9260_init_interrupts(unsigned int priority[]);
extern void __init at91sam9261_init_interrupts(unsigned int priority[]);
extern void __init at91sam9263_init_interrupts(unsigned int priority[]);
extern void __init at91sam9rl_init_interrupts(unsigned int priority[]);
+extern void __init at91sam9g45_init_interrupts(unsigned int priority[]);
extern void __init at91x40_init_interrupts(unsigned int priority[]);
extern void __init at91cap9_init_interrupts(unsigned int priority[]);
extern void __init at91_aic_init(unsigned int priority[]);
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index f2236f0e101f..ae4772e744ac 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -44,13 +44,11 @@ static int at91_gpiolib_direction_output(struct gpio_chip *chip,
unsigned offset, int val);
static int at91_gpiolib_direction_input(struct gpio_chip *chip,
unsigned offset);
-static int at91_gpiolib_request(struct gpio_chip *chip, unsigned offset);
#define AT91_GPIO_CHIP(name, base_gpio, nr_gpio) \
{ \
.chip = { \
.label = name, \
- .request = at91_gpiolib_request, \
.direction_input = at91_gpiolib_direction_input, \
.direction_output = at91_gpiolib_direction_output, \
.get = at91_gpiolib_get, \
@@ -588,19 +586,6 @@ static void at91_gpiolib_set(struct gpio_chip *chip, unsigned offset, int val)
__raw_writel(mask, pio + (val ? PIO_SODR : PIO_CODR));
}
-static int at91_gpiolib_request(struct gpio_chip *chip, unsigned offset)
-{
- unsigned pin = chip->base + offset;
- void __iomem *pio = pin_to_controller(pin);
- unsigned mask = pin_to_mask(pin);
-
- /* Cannot request GPIOs that are in alternate function mode */
- if (!(__raw_readl(pio + PIO_PSR) & mask))
- return -EPERM;
-
- return 0;
-}
-
static void at91_gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
int i;
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261.h b/arch/arm/mach-at91/include/mach/at91sam9261.h
index 3a348ca20773..87de8be17484 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9261.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9261.h
@@ -95,6 +95,9 @@
#define AT91SAM9261_SRAM_BASE 0x00300000 /* Internal SRAM base address */
#define AT91SAM9261_SRAM_SIZE 0x00028000 /* Internal SRAM size (160Kb) */
+#define AT91SAM9G10_SRAM_BASE AT91SAM9261_SRAM_BASE /* Internal SRAM base address */
+#define AT91SAM9G10_SRAM_SIZE 0x00004000 /* Internal SRAM size (16Kb) */
+
#define AT91SAM9261_ROM_BASE 0x00400000 /* Internal ROM base address */
#define AT91SAM9261_ROM_SIZE SZ_32K /* Internal ROM size (32Kb) */
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
new file mode 100644
index 000000000000..2c42cf5f0c54
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h
@@ -0,0 +1,138 @@
+/*
+ * Chip-specific header file for the AT91SAM9G45 family
+ *
+ * Copyright (C) 2008-2009 Atmel Corporation.
+ *
+ * Common definitions.
+ * Based on AT91SAM9G45 preliminary datasheet.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91SAM9G45_H
+#define AT91SAM9G45_H
+
+/*
+ * Peripheral identifiers/interrupts.
+ */
+#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
+#define AT91_ID_SYS 1 /* System Controller Interrupt */
+#define AT91SAM9G45_ID_PIOA 2 /* Parallel I/O Controller A */
+#define AT91SAM9G45_ID_PIOB 3 /* Parallel I/O Controller B */
+#define AT91SAM9G45_ID_PIOC 4 /* Parallel I/O Controller C */
+#define AT91SAM9G45_ID_PIODE 5 /* Parallel I/O Controller D and E */
+#define AT91SAM9G45_ID_TRNG 6 /* True Random Number Generator */
+#define AT91SAM9G45_ID_US0 7 /* USART 0 */
+#define AT91SAM9G45_ID_US1 8 /* USART 1 */
+#define AT91SAM9G45_ID_US2 9 /* USART 2 */
+#define AT91SAM9G45_ID_US3 10 /* USART 3 */
+#define AT91SAM9G45_ID_MCI0 11 /* High Speed Multimedia Card Interface 0 */
+#define AT91SAM9G45_ID_TWI0 12 /* Two-Wire Interface 0 */
+#define AT91SAM9G45_ID_TWI1 13 /* Two-Wire Interface 1 */
+#define AT91SAM9G45_ID_SPI0 14 /* Serial Peripheral Interface 0 */
+#define AT91SAM9G45_ID_SPI1 15 /* Serial Peripheral Interface 1 */
+#define AT91SAM9G45_ID_SSC0 16 /* Synchronous Serial Controller 0 */
+#define AT91SAM9G45_ID_SSC1 17 /* Synchronous Serial Controller 1 */
+#define AT91SAM9G45_ID_TCB 18 /* Timer Counter 0, 1, 2, 3, 4 and 5 */
+#define AT91SAM9G45_ID_PWMC 19 /* Pulse Width Modulation Controller */
+#define AT91SAM9G45_ID_TSC 20 /* Touch Screen ADC Controller */
+#define AT91SAM9G45_ID_DMA 21 /* DMA Controller */
+#define AT91SAM9G45_ID_UHPHS 22 /* USB Host High Speed */
+#define AT91SAM9G45_ID_LCDC 23 /* LCD Controller */
+#define AT91SAM9G45_ID_AC97C 24 /* AC97 Controller */
+#define AT91SAM9G45_ID_EMAC 25 /* Ethernet MAC */
+#define AT91SAM9G45_ID_ISI 26 /* Image Sensor Interface */
+#define AT91SAM9G45_ID_UDPHS 27 /* USB Device High Speed */
+#define AT91SAM9G45_ID_AESTDESSHA 28 /* AES + T-DES + SHA */
+#define AT91SAM9G45_ID_MCI1 29 /* High Speed Multimedia Card Interface 1 */
+#define AT91SAM9G45_ID_VDEC 30 /* Video Decoder */
+#define AT91SAM9G45_ID_IRQ0 31 /* Advanced Interrupt Controller */
+
+/*
+ * User Peripheral physical base addresses.
+ */
+#define AT91SAM9G45_BASE_UDPHS 0xfff78000
+#define AT91SAM9G45_BASE_TCB0 0xfff7c000
+#define AT91SAM9G45_BASE_TC0 0xfff7c000
+#define AT91SAM9G45_BASE_TC1 0xfff7c040
+#define AT91SAM9G45_BASE_TC2 0xfff7c080
+#define AT91SAM9G45_BASE_MCI0 0xfff80000
+#define AT91SAM9G45_BASE_TWI0 0xfff84000
+#define AT91SAM9G45_BASE_TWI1 0xfff88000
+#define AT91SAM9G45_BASE_US0 0xfff8c000
+#define AT91SAM9G45_BASE_US1 0xfff90000
+#define AT91SAM9G45_BASE_US2 0xfff94000
+#define AT91SAM9G45_BASE_US3 0xfff98000
+#define AT91SAM9G45_BASE_SSC0 0xfff9c000
+#define AT91SAM9G45_BASE_SSC1 0xfffa0000
+#define AT91SAM9G45_BASE_SPI0 0xfffa4000
+#define AT91SAM9G45_BASE_SPI1 0xfffa8000
+#define AT91SAM9G45_BASE_AC97C 0xfffac000
+#define AT91SAM9G45_BASE_TSC 0xfffb0000
+#define AT91SAM9G45_BASE_ISI 0xfffb4000
+#define AT91SAM9G45_BASE_PWMC 0xfffb8000
+#define AT91SAM9G45_BASE_EMAC 0xfffbc000
+#define AT91SAM9G45_BASE_AES 0xfffc0000
+#define AT91SAM9G45_BASE_TDES 0xfffc4000
+#define AT91SAM9G45_BASE_SHA 0xfffc8000
+#define AT91SAM9G45_BASE_TRNG 0xfffcc000
+#define AT91SAM9G45_BASE_MCI1 0xfffd0000
+#define AT91SAM9G45_BASE_TCB1 0xfffd4000
+#define AT91SAM9G45_BASE_TC3 0xfffd4000
+#define AT91SAM9G45_BASE_TC4 0xfffd4040
+#define AT91SAM9G45_BASE_TC5 0xfffd4080
+#define AT91_BASE_SYS 0xffffe200
+
+/*
+ * System Peripherals (offset from AT91_BASE_SYS)
+ */
+#define AT91_ECC (0xffffe200 - AT91_BASE_SYS)
+#define AT91_DDRSDRC1 (0xffffe400 - AT91_BASE_SYS)
+#define AT91_DDRSDRC0 (0xffffe600 - AT91_BASE_SYS)
+#define AT91_SMC (0xffffe800 - AT91_BASE_SYS)
+#define AT91_MATRIX (0xffffea00 - AT91_BASE_SYS)
+#define AT91_DMA (0xffffec00 - AT91_BASE_SYS)
+#define AT91_DBGU (0xffffee00 - AT91_BASE_SYS)
+#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
+#define AT91_PIOA (0xfffff200 - AT91_BASE_SYS)
+#define AT91_PIOB (0xfffff400 - AT91_BASE_SYS)
+#define AT91_PIOC (0xfffff600 - AT91_BASE_SYS)
+#define AT91_PIOD (0xfffff800 - AT91_BASE_SYS)
+#define AT91_PIOE (0xfffffa00 - AT91_BASE_SYS)
+#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
+#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
+#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
+#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
+#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
+#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
+#define AT91_GPBR (0xfffffd60 - AT91_BASE_SYS)
+#define AT91_RTC (0xfffffdb0 - AT91_BASE_SYS)
+
+#define AT91_USART0 AT91SAM9G45_BASE_US0
+#define AT91_USART1 AT91SAM9G45_BASE_US1
+#define AT91_USART2 AT91SAM9G45_BASE_US2
+#define AT91_USART3 AT91SAM9G45_BASE_US3
+
+/*
+ * Internal Memory.
+ */
+#define AT91SAM9G45_SRAM_BASE 0x00300000 /* Internal SRAM base address */
+#define AT91SAM9G45_SRAM_SIZE SZ_64K /* Internal SRAM size (64Kb) */
+
+#define AT91SAM9G45_ROM_BASE 0x00400000 /* Internal ROM base address */
+#define AT91SAM9G45_ROM_SIZE SZ_64K /* Internal ROM size (64Kb) */
+
+#define AT91SAM9G45_LCDC_BASE 0x00500000 /* LCD Controller */
+#define AT91SAM9G45_UDPHS_FIFO 0x00600000 /* USB Device HS controller */
+#define AT91SAM9G45_OHCI_BASE 0x00700000 /* USB Host controller (OHCI) */
+#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
+#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
+
+#define CONFIG_DRAM_BASE AT91_CHIPSELECT_6
+
+#define CONSISTENT_DMA_SIZE SZ_4M
+
+#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h b/arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h
new file mode 100644
index 000000000000..c972d60e0aeb
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45_matrix.h
@@ -0,0 +1,153 @@
+/*
+ * Matrix-centric header file for the AT91SAM9G45 family
+ *
+ * Copyright (C) 2008-2009 Atmel Corporation.
+ *
+ * Memory Controllers (MATRIX, EBI) - System peripherals registers.
+ * Based on AT91SAM9G45 preliminary datasheet.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91SAM9G45_MATRIX_H
+#define AT91SAM9G45_MATRIX_H
+
+#define AT91_MATRIX_MCFG0 (AT91_MATRIX + 0x00) /* Master Configuration Register 0 */
+#define AT91_MATRIX_MCFG1 (AT91_MATRIX + 0x04) /* Master Configuration Register 1 */
+#define AT91_MATRIX_MCFG2 (AT91_MATRIX + 0x08) /* Master Configuration Register 2 */
+#define AT91_MATRIX_MCFG3 (AT91_MATRIX + 0x0C) /* Master Configuration Register 3 */
+#define AT91_MATRIX_MCFG4 (AT91_MATRIX + 0x10) /* Master Configuration Register 4 */
+#define AT91_MATRIX_MCFG5 (AT91_MATRIX + 0x14) /* Master Configuration Register 5 */
+#define AT91_MATRIX_MCFG6 (AT91_MATRIX + 0x18) /* Master Configuration Register 6 */
+#define AT91_MATRIX_MCFG7 (AT91_MATRIX + 0x1C) /* Master Configuration Register 7 */
+#define AT91_MATRIX_MCFG8 (AT91_MATRIX + 0x20) /* Master Configuration Register 8 */
+#define AT91_MATRIX_MCFG9 (AT91_MATRIX + 0x24) /* Master Configuration Register 9 */
+#define AT91_MATRIX_MCFG10 (AT91_MATRIX + 0x28) /* Master Configuration Register 10 */
+#define AT91_MATRIX_MCFG11 (AT91_MATRIX + 0x2C) /* Master Configuration Register 11 */
+#define AT91_MATRIX_ULBT (7 << 0) /* Undefined Length Burst Type */
+#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
+#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
+#define AT91_MATRIX_ULBT_FOUR (2 << 0)
+#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
+#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
+#define AT91_MATRIX_ULBT_THIRTYTWO (5 << 0)
+#define AT91_MATRIX_ULBT_SIXTYFOUR (6 << 0)
+#define AT91_MATRIX_ULBT_128 (7 << 0)
+
+#define AT91_MATRIX_SCFG0 (AT91_MATRIX + 0x40) /* Slave Configuration Register 0 */
+#define AT91_MATRIX_SCFG1 (AT91_MATRIX + 0x44) /* Slave Configuration Register 1 */
+#define AT91_MATRIX_SCFG2 (AT91_MATRIX + 0x48) /* Slave Configuration Register 2 */
+#define AT91_MATRIX_SCFG3 (AT91_MATRIX + 0x4C) /* Slave Configuration Register 3 */
+#define AT91_MATRIX_SCFG4 (AT91_MATRIX + 0x50) /* Slave Configuration Register 4 */
+#define AT91_MATRIX_SCFG5 (AT91_MATRIX + 0x54) /* Slave Configuration Register 5 */
+#define AT91_MATRIX_SCFG6 (AT91_MATRIX + 0x58) /* Slave Configuration Register 6 */
+#define AT91_MATRIX_SCFG7 (AT91_MATRIX + 0x5C) /* Slave Configuration Register 7 */
+#define AT91_MATRIX_SLOT_CYCLE (0x1ff << 0) /* Maximum Number of Allowed Cycles for a Burst */
+#define AT91_MATRIX_DEFMSTR_TYPE (3 << 16) /* Default Master Type */
+#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
+#define AT91_MATRIX_FIXED_DEFMSTR (0xf << 18) /* Fixed Index of Default Master */
+
+#define AT91_MATRIX_PRAS0 (AT91_MATRIX + 0x80) /* Priority Register A for Slave 0 */
+#define AT91_MATRIX_PRBS0 (AT91_MATRIX + 0x84) /* Priority Register B for Slave 0 */
+#define AT91_MATRIX_PRAS1 (AT91_MATRIX + 0x88) /* Priority Register A for Slave 1 */
+#define AT91_MATRIX_PRBS1 (AT91_MATRIX + 0x8C) /* Priority Register B for Slave 1 */
+#define AT91_MATRIX_PRAS2 (AT91_MATRIX + 0x90) /* Priority Register A for Slave 2 */
+#define AT91_MATRIX_PRBS2 (AT91_MATRIX + 0x94) /* Priority Register B for Slave 2 */
+#define AT91_MATRIX_PRAS3 (AT91_MATRIX + 0x98) /* Priority Register A for Slave 3 */
+#define AT91_MATRIX_PRBS3 (AT91_MATRIX + 0x9C) /* Priority Register B for Slave 3 */
+#define AT91_MATRIX_PRAS4 (AT91_MATRIX + 0xA0) /* Priority Register A for Slave 4 */
+#define AT91_MATRIX_PRBS4 (AT91_MATRIX + 0xA4) /* Priority Register B for Slave 4 */
+#define AT91_MATRIX_PRAS5 (AT91_MATRIX + 0xA8) /* Priority Register A for Slave 5 */
+#define AT91_MATRIX_PRBS5 (AT91_MATRIX + 0xAC) /* Priority Register B for Slave 5 */
+#define AT91_MATRIX_PRAS6 (AT91_MATRIX + 0xB0) /* Priority Register A for Slave 6 */
+#define AT91_MATRIX_PRBS6 (AT91_MATRIX + 0xB4) /* Priority Register B for Slave 6 */
+#define AT91_MATRIX_PRAS7 (AT91_MATRIX + 0xB8) /* Priority Register A for Slave 7 */
+#define AT91_MATRIX_PRBS7 (AT91_MATRIX + 0xBC) /* Priority Register B for Slave 7 */
+#define AT91_MATRIX_M0PR (3 << 0) /* Master 0 Priority */
+#define AT91_MATRIX_M1PR (3 << 4) /* Master 1 Priority */
+#define AT91_MATRIX_M2PR (3 << 8) /* Master 2 Priority */
+#define AT91_MATRIX_M3PR (3 << 12) /* Master 3 Priority */
+#define AT91_MATRIX_M4PR (3 << 16) /* Master 4 Priority */
+#define AT91_MATRIX_M5PR (3 << 20) /* Master 5 Priority */
+#define AT91_MATRIX_M6PR (3 << 24) /* Master 6 Priority */
+#define AT91_MATRIX_M7PR (3 << 28) /* Master 7 Priority */
+#define AT91_MATRIX_M8PR (3 << 0) /* Master 8 Priority (in Register B) */
+#define AT91_MATRIX_M9PR (3 << 4) /* Master 9 Priority (in Register B) */
+#define AT91_MATRIX_M10PR (3 << 8) /* Master 10 Priority (in Register B) */
+#define AT91_MATRIX_M11PR (3 << 12) /* Master 11 Priority (in Register B) */
+
+#define AT91_MATRIX_MRCR (AT91_MATRIX + 0x100) /* Master Remap Control Register */
+#define AT91_MATRIX_RCB0 (1 << 0) /* Remap Command for AHB Master 0 (ARM926EJ-S Instruction Master) */
+#define AT91_MATRIX_RCB1 (1 << 1) /* Remap Command for AHB Master 1 (ARM926EJ-S Data Master) */
+#define AT91_MATRIX_RCB2 (1 << 2)
+#define AT91_MATRIX_RCB3 (1 << 3)
+#define AT91_MATRIX_RCB4 (1 << 4)
+#define AT91_MATRIX_RCB5 (1 << 5)
+#define AT91_MATRIX_RCB6 (1 << 6)
+#define AT91_MATRIX_RCB7 (1 << 7)
+#define AT91_MATRIX_RCB8 (1 << 8)
+#define AT91_MATRIX_RCB9 (1 << 9)
+#define AT91_MATRIX_RCB10 (1 << 10)
+#define AT91_MATRIX_RCB11 (1 << 11)
+
+#define AT91_MATRIX_TCMR (AT91_MATRIX + 0x110) /* TCM Configuration Register */
+#define AT91_MATRIX_ITCM_SIZE (0xf << 0) /* Size of ITCM enabled memory block */
+#define AT91_MATRIX_ITCM_0 (0 << 0)
+#define AT91_MATRIX_ITCM_32 (6 << 0)
+#define AT91_MATRIX_DTCM_SIZE (0xf << 4) /* Size of DTCM enabled memory block */
+#define AT91_MATRIX_DTCM_0 (0 << 4)
+#define AT91_MATRIX_DTCM_32 (6 << 4)
+#define AT91_MATRIX_DTCM_64 (7 << 4)
+#define AT91_MATRIX_TCM_NWS (0x1 << 11) /* Wait state TCM register */
+#define AT91_MATRIX_TCM_NO_WS (0x0 << 11)
+#define AT91_MATRIX_TCM_ONE_WS (0x1 << 11)
+
+#define AT91_MATRIX_VIDEO (AT91_MATRIX + 0x118) /* Video Mode Configuration Register */
+#define AT91C_VDEC_SEL (0x1 << 0) /* Video Mode Selection */
+#define AT91C_VDEC_SEL_OFF (0 << 0)
+#define AT91C_VDEC_SEL_ON (1 << 0)
+
+#define AT91_MATRIX_EBICSA (AT91_MATRIX + 0x128) /* EBI Chip Select Assignment Register */
+#define AT91_MATRIX_EBI_CS1A (1 << 1) /* Chip Select 1 Assignment */
+#define AT91_MATRIX_EBI_CS1A_SMC (0 << 1)
+#define AT91_MATRIX_EBI_CS1A_SDRAMC (1 << 1)
+#define AT91_MATRIX_EBI_CS3A (1 << 3) /* Chip Select 3 Assignment */
+#define AT91_MATRIX_EBI_CS3A_SMC (0 << 3)
+#define AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA (1 << 3)
+#define AT91_MATRIX_EBI_CS4A (1 << 4) /* Chip Select 4 Assignment */
+#define AT91_MATRIX_EBI_CS4A_SMC (0 << 4)
+#define AT91_MATRIX_EBI_CS4A_SMC_CF0 (1 << 4)
+#define AT91_MATRIX_EBI_CS5A (1 << 5) /* Chip Select 5 Assignment */
+#define AT91_MATRIX_EBI_CS5A_SMC (0 << 5)
+#define AT91_MATRIX_EBI_CS5A_SMC_CF1 (1 << 5)
+#define AT91_MATRIX_EBI_DBPUC (1 << 8) /* Data Bus Pull-up Configuration */
+#define AT91_MATRIX_EBI_DBPU_ON (0 << 8)
+#define AT91_MATRIX_EBI_DBPU_OFF (1 << 8)
+#define AT91_MATRIX_EBI_VDDIOMSEL (1 << 16) /* Memory voltage selection */
+#define AT91_MATRIX_EBI_VDDIOMSEL_1_8V (0 << 16)
+#define AT91_MATRIX_EBI_VDDIOMSEL_3_3V (1 << 16)
+#define AT91_MATRIX_EBI_EBI_IOSR (1 << 17) /* EBI I/O slew rate selection */
+#define AT91_MATRIX_EBI_EBI_IOSR_REDUCED (0 << 17)
+#define AT91_MATRIX_EBI_EBI_IOSR_NORMAL (1 << 17)
+#define AT91_MATRIX_EBI_DDR_IOSR (1 << 18) /* DDR2 dedicated port I/O slew rate selection */
+#define AT91_MATRIX_EBI_DDR_IOSR_REDUCED (0 << 18)
+#define AT91_MATRIX_EBI_DDR_IOSR_NORMAL (1 << 18)
+
+#define AT91_MATRIX_WPMR (AT91_MATRIX + 0x1E4) /* Write Protect Mode Register */
+#define AT91_MATRIX_WPMR_WPEN (1 << 0) /* Write Protect ENable */
+#define AT91_MATRIX_WPMR_WP_WPDIS (0 << 0)
+#define AT91_MATRIX_WPMR_WP_WPEN (1 << 0)
+#define AT91_MATRIX_WPMR_WPKEY (0xFFFFFF << 8) /* Write Protect KEY */
+
+#define AT91_MATRIX_WPSR (AT91_MATRIX + 0x1E8) /* Write Protect Status Register */
+#define AT91_MATRIX_WPSR_WPVS (1 << 0) /* Write Protect Violation Status */
+#define AT91_MATRIX_WPSR_NO_WPV (0 << 0)
+#define AT91_MATRIX_WPSR_WPV (1 << 0)
+#define AT91_MATRIX_WPSR_WPVSRC (0xFFFF << 8) /* Write Protect Violation Source */
+
+#endif
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index e6afff849b85..74801d275cdc 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -80,7 +80,8 @@ struct at91_eth_data {
};
extern void __init at91_add_device_eth(struct at91_eth_data *data);
-#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9)
+#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \
+ || defined(CONFIG_ARCH_AT91SAM9G45)
#define eth_platform_data at91_eth_data
#endif
@@ -90,6 +91,7 @@ struct at91_usbh_data {
u8 vbus_pin[2]; /* port power-control pin */
};
extern void __init at91_add_device_usbh(struct at91_usbh_data *data);
+extern void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data);
/* NAND / SmartMedia */
struct atmel_nand_data {
@@ -105,7 +107,11 @@ struct atmel_nand_data {
extern void __init at91_add_device_nand(struct atmel_nand_data *data);
/* I2C*/
+#if defined(CONFIG_ARCH_AT91SAM9G45)
+extern void __init at91_add_device_i2c(short i2c_id, struct i2c_board_info *devices, int nr_devices);
+#else
extern void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices);
+#endif
/* SPI */
extern void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices);
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
index c554c3e4d553..34a9502c48bc 100644
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ b/arch/arm/mach-at91/include/mach/cpu.h
@@ -21,8 +21,10 @@
#define ARCH_ID_AT91SAM9260 0x019803a0
#define ARCH_ID_AT91SAM9261 0x019703a0
#define ARCH_ID_AT91SAM9263 0x019607a0
+#define ARCH_ID_AT91SAM9G10 0x819903a0
#define ARCH_ID_AT91SAM9G20 0x019905a0
#define ARCH_ID_AT91SAM9RL64 0x019b03a0
+#define ARCH_ID_AT91SAM9G45 0x819b05a0
#define ARCH_ID_AT91CAP9 0x039A03A0
#define ARCH_ID_AT91SAM9XE128 0x329973a0
@@ -39,6 +41,15 @@ static inline unsigned long at91_cpu_identify(void)
return (at91_sys_read(AT91_DBGU_CIDR) & ~AT91_CIDR_VERSION);
}
+#define ARCH_EXID_AT91SAM9M11 0x00000001
+#define ARCH_EXID_AT91SAM9M10 0x00000002
+#define ARCH_EXID_AT91SAM9G45 0x00000004
+
+static inline unsigned long at91_exid_identify(void)
+{
+ return at91_sys_read(AT91_DBGU_EXID);
+}
+
#define ARCH_FAMILY_AT91X92 0x09200000
#define ARCH_FAMILY_AT91SAM9 0x01900000
@@ -87,6 +98,12 @@ static inline unsigned long at91cap9_rev_identify(void)
#define cpu_is_at91sam9261() (0)
#endif
+#ifdef CONFIG_ARCH_AT91SAM9G10
+#define cpu_is_at91sam9g10() (at91_cpu_identify() == ARCH_ID_AT91SAM9G10)
+#else
+#define cpu_is_at91sam9g10() (0)
+#endif
+
#ifdef CONFIG_ARCH_AT91SAM9263
#define cpu_is_at91sam9263() (at91_cpu_identify() == ARCH_ID_AT91SAM9263)
#else
@@ -99,6 +116,12 @@ static inline unsigned long at91cap9_rev_identify(void)
#define cpu_is_at91sam9rl() (0)
#endif
+#ifdef CONFIG_ARCH_AT91SAM9G45
+#define cpu_is_at91sam9g45() (at91_cpu_identify() == ARCH_ID_AT91SAM9G45)
+#else
+#define cpu_is_at91sam9g45() (0)
+#endif
+
#ifdef CONFIG_ARCH_AT91CAP9
#define cpu_is_at91cap9() (at91_cpu_identify() == ARCH_ID_AT91CAP9)
#define cpu_is_at91cap9_revB() (at91cap9_rev_identify() == ARCH_REVISION_CAP9_B)
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index da0b681c652c..a0df8b022df2 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -20,12 +20,14 @@
#include <mach/at91rm9200.h>
#elif defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20)
#include <mach/at91sam9260.h>
-#elif defined(CONFIG_ARCH_AT91SAM9261)
+#elif defined(CONFIG_ARCH_AT91SAM9261) || defined(CONFIG_ARCH_AT91SAM9G10)
#include <mach/at91sam9261.h>
#elif defined(CONFIG_ARCH_AT91SAM9263)
#include <mach/at91sam9263.h>
#elif defined(CONFIG_ARCH_AT91SAM9RL)
#include <mach/at91sam9rl.h>
+#elif defined(CONFIG_ARCH_AT91SAM9G45)
+#include <mach/at91sam9g45.h>
#elif defined(CONFIG_ARCH_AT91CAP9)
#include <mach/at91cap9.h>
#elif defined(CONFIG_ARCH_AT91X40)
diff --git a/arch/arm/mach-at91/include/mach/timex.h b/arch/arm/mach-at91/include/mach/timex.h
index d84c9948becf..31ac2d97f14c 100644
--- a/arch/arm/mach-at91/include/mach/timex.h
+++ b/arch/arm/mach-at91/include/mach/timex.h
@@ -42,6 +42,11 @@
#define AT91SAM9_MASTER_CLOCK 99300000
#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
+#elif defined(CONFIG_ARCH_AT91SAM9G10)
+
+#define AT91SAM9_MASTER_CLOCK 133000000
+#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
+
#elif defined(CONFIG_ARCH_AT91SAM9263)
#if defined(CONFIG_MACH_USB_A9263)
@@ -62,6 +67,11 @@
#define AT91SAM9_MASTER_CLOCK 132096000
#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
+#elif defined(CONFIG_ARCH_AT91SAM9G45)
+
+#define AT91SAM9_MASTER_CLOCK 133333333
+#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
+
#elif defined(CONFIG_ARCH_AT91CAP9)
#define AT91CAP9_MASTER_CLOCK 100000000
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index e26c4fe61fae..4028724d490d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,8 @@ static int at91_pm_verify_clocks(void)
pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
return 0;
}
- } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() || cpu_is_at91sam9263() || cpu_is_at91sam9g20()) {
+ } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() || cpu_is_at91sam9263()
+ || cpu_is_at91sam9g20() || cpu_is_at91sam9g10()) {
if ((scsr & (AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP)) != 0) {
pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
return 0;
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 6c4c1633ed12..9b7db8f7b4e9 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -22,18 +22,6 @@
#include <mach/hardware.h>
-/*
- * The EP93xx has two external crystal oscillators. To generate the
- * required high-frequency clocks, the processor uses two phase-locked-
- * loops (PLLs) to multiply the incoming external clock signal to much
- * higher frequencies that are then divided down by programmable dividers
- * to produce the needed clocks. The PLLs operate independently of one
- * another.
- */
-#define EP93XX_EXT_CLK_RATE 14745600
-#define EP93XX_EXT_RTC_RATE 32768
-
-
struct clk {
unsigned long rate;
int users;
@@ -42,11 +30,14 @@ struct clk {
u32 enable_mask;
unsigned long (*get_rate)(struct clk *clk);
+ int (*set_rate)(struct clk *clk, unsigned long rate);
};
static unsigned long get_uart_rate(struct clk *clk);
+static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
+
static struct clk clk_uart1 = {
.sw_locked = 1,
@@ -75,6 +66,12 @@ static struct clk clk_usb_host = {
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_USH_EN,
};
+static struct clk clk_keypad = {
+ .sw_locked = 1,
+ .enable_reg = EP93XX_SYSCON_KEYTCHCLKDIV,
+ .enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
+ .set_rate = set_keytchclk_rate,
+};
/* DMA Clocks */
static struct clk clk_m2p0 = {
@@ -130,27 +127,28 @@ static struct clk clk_m2m1 = {
{ .dev_id = dev, .con_id = con, .clk = ck }
static struct clk_lookup clocks[] = {
- INIT_CK("apb:uart1", NULL, &clk_uart1),
- INIT_CK("apb:uart2", NULL, &clk_uart2),
- INIT_CK("apb:uart3", NULL, &clk_uart3),
- INIT_CK(NULL, "pll1", &clk_pll1),
- INIT_CK(NULL, "fclk", &clk_f),
- INIT_CK(NULL, "hclk", &clk_h),
- INIT_CK(NULL, "pclk", &clk_p),
- INIT_CK(NULL, "pll2", &clk_pll2),
- INIT_CK("ep93xx-ohci", NULL, &clk_usb_host),
- INIT_CK(NULL, "m2p0", &clk_m2p0),
- INIT_CK(NULL, "m2p1", &clk_m2p1),
- INIT_CK(NULL, "m2p2", &clk_m2p2),
- INIT_CK(NULL, "m2p3", &clk_m2p3),
- INIT_CK(NULL, "m2p4", &clk_m2p4),
- INIT_CK(NULL, "m2p5", &clk_m2p5),
- INIT_CK(NULL, "m2p6", &clk_m2p6),
- INIT_CK(NULL, "m2p7", &clk_m2p7),
- INIT_CK(NULL, "m2p8", &clk_m2p8),
- INIT_CK(NULL, "m2p9", &clk_m2p9),
- INIT_CK(NULL, "m2m0", &clk_m2m0),
- INIT_CK(NULL, "m2m1", &clk_m2m1),
+ INIT_CK("apb:uart1", NULL, &clk_uart1),
+ INIT_CK("apb:uart2", NULL, &clk_uart2),
+ INIT_CK("apb:uart3", NULL, &clk_uart3),
+ INIT_CK(NULL, "pll1", &clk_pll1),
+ INIT_CK(NULL, "fclk", &clk_f),
+ INIT_CK(NULL, "hclk", &clk_h),
+ INIT_CK(NULL, "pclk", &clk_p),
+ INIT_CK(NULL, "pll2", &clk_pll2),
+ INIT_CK("ep93xx-ohci", NULL, &clk_usb_host),
+ INIT_CK("ep93xx-keypad", NULL, &clk_keypad),
+ INIT_CK(NULL, "m2p0", &clk_m2p0),
+ INIT_CK(NULL, "m2p1", &clk_m2p1),
+ INIT_CK(NULL, "m2p2", &clk_m2p2),
+ INIT_CK(NULL, "m2p3", &clk_m2p3),
+ INIT_CK(NULL, "m2p4", &clk_m2p4),
+ INIT_CK(NULL, "m2p5", &clk_m2p5),
+ INIT_CK(NULL, "m2p6", &clk_m2p6),
+ INIT_CK(NULL, "m2p7", &clk_m2p7),
+ INIT_CK(NULL, "m2p8", &clk_m2p8),
+ INIT_CK(NULL, "m2p9", &clk_m2p9),
+ INIT_CK(NULL, "m2m0", &clk_m2m0),
+ INIT_CK(NULL, "m2m1", &clk_m2m1),
};
@@ -202,6 +200,43 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_rate);
+static int set_keytchclk_rate(struct clk *clk, unsigned long rate)
+{
+ u32 val;
+ u32 div_bit;
+
+ val = __raw_readl(clk->enable_reg);
+
+ /*
+ * The Key Matrix and ADC clocks are configured using the same
+ * System Controller register. The clock used will be either
+ * 1/4 or 1/16 the external clock rate depending on the
+ * EP93XX_SYSCON_KEYTCHCLKDIV_KDIV/EP93XX_SYSCON_KEYTCHCLKDIV_ADIV
+ * bit being set or cleared.
+ */
+ div_bit = clk->enable_mask >> 15;
+
+ if (rate == EP93XX_KEYTCHCLK_DIV4)
+ val |= div_bit;
+ else if (rate == EP93XX_KEYTCHCLK_DIV16)
+ val &= ~div_bit;
+ else
+ return -EINVAL;
+
+ ep93xx_syscon_swlocked_write(val, clk->enable_reg);
+ clk->rate = rate;
+ return 0;
+}
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (clk->set_rate)
+ return clk->set_rate(clk, rate);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index 482cf3d2fbcd..2d83d69e2eed 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -17,15 +17,15 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/io.h>
+#include <linux/gpio.h>
-#include <mach/ep93xx-regs.h>
-#include <asm/gpio.h>
+#include <mach/hardware.h>
struct ep93xx_gpio_chip {
struct gpio_chip chip;
- unsigned int data_reg;
- unsigned int data_dir_reg;
+ void __iomem *data_reg;
+ void __iomem *data_dir_reg;
};
#define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip)
@@ -111,15 +111,61 @@ static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
u8 data_reg, data_dir_reg;
- int i;
+ int gpio, i;
data_reg = __raw_readb(ep93xx_chip->data_reg);
data_dir_reg = __raw_readb(ep93xx_chip->data_dir_reg);
- for (i = 0; i < chip->ngpio; i++)
- seq_printf(s, "GPIO %s%d: %s %s\n", chip->label, i,
- (data_reg & (1 << i)) ? "set" : "clear",
- (data_dir_reg & (1 << i)) ? "out" : "in");
+ gpio = ep93xx_chip->chip.base;
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ int is_out = data_dir_reg & (1 << i);
+
+ seq_printf(s, " %s%d gpio-%-3d (%-12s) %s %s",
+ chip->label, i, gpio,
+ gpiochip_is_requested(chip, i) ? : "",
+ is_out ? "out" : "in ",
+ (data_reg & (1 << i)) ? "hi" : "lo");
+
+ if (!is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_desc + irq;
+
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+
+ switch (desc->status & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_NONE:
+ trigger = "(default)";
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ trigger = "edge-falling";
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ trigger = "edge-rising";
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ trigger = "edge-both";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ trigger = "level-high";
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ trigger = "level-low";
+ break;
+ default:
+ trigger = "?trigger?";
+ break;
+ }
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ (desc->status & IRQ_WAKEUP)
+ ? " wakeup" : "");
+ }
+ }
+
+ seq_printf(s, "\n");
+ }
}
#define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index 967c079180db..49b256b3ddfc 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -52,40 +52,43 @@
#define EP93XX_AHB_VIRT_BASE 0xfef00000
#define EP93XX_AHB_SIZE 0x00100000
+#define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x))
+
#define EP93XX_APB_PHYS_BASE 0x80800000
#define EP93XX_APB_VIRT_BASE 0xfed00000
#define EP93XX_APB_SIZE 0x00200000
+#define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x))
+
/* AHB peripherals */
-#define EP93XX_DMA_BASE ((void __iomem *) \
- (EP93XX_AHB_VIRT_BASE + 0x00000000))
+#define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000)
-#define EP93XX_ETHERNET_BASE (EP93XX_AHB_VIRT_BASE + 0x00010000)
#define EP93XX_ETHERNET_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00010000)
+#define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000)
-#define EP93XX_USB_BASE (EP93XX_AHB_VIRT_BASE + 0x00020000)
#define EP93XX_USB_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00020000)
+#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000)
-#define EP93XX_RASTER_BASE (EP93XX_AHB_VIRT_BASE + 0x00030000)
+#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000)
-#define EP93XX_GRAPHICS_ACCEL_BASE (EP93XX_AHB_VIRT_BASE + 0x00040000)
+#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000)
-#define EP93XX_SDRAM_CONTROLLER_BASE (EP93XX_AHB_VIRT_BASE + 0x00060000)
+#define EP93XX_SDRAM_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00060000)
-#define EP93XX_PCMCIA_CONTROLLER_BASE (EP93XX_AHB_VIRT_BASE + 0x00080000)
+#define EP93XX_PCMCIA_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00080000)
-#define EP93XX_BOOT_ROM_BASE (EP93XX_AHB_VIRT_BASE + 0x00090000)
+#define EP93XX_BOOT_ROM_BASE EP93XX_AHB_IOMEM(0x00090000)
-#define EP93XX_IDE_BASE (EP93XX_AHB_VIRT_BASE + 0x000a0000)
+#define EP93XX_IDE_BASE EP93XX_AHB_IOMEM(0x000a0000)
-#define EP93XX_VIC1_BASE (EP93XX_AHB_VIRT_BASE + 0x000b0000)
+#define EP93XX_VIC1_BASE EP93XX_AHB_IOMEM(0x000b0000)
-#define EP93XX_VIC2_BASE (EP93XX_AHB_VIRT_BASE + 0x000c0000)
+#define EP93XX_VIC2_BASE EP93XX_AHB_IOMEM(0x000c0000)
/* APB peripherals */
-#define EP93XX_TIMER_BASE (EP93XX_APB_VIRT_BASE + 0x00010000)
+#define EP93XX_TIMER_BASE EP93XX_APB_IOMEM(0x00010000)
#define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x))
#define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00)
#define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04)
@@ -102,11 +105,11 @@
#define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88)
#define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c)
-#define EP93XX_I2S_BASE (EP93XX_APB_VIRT_BASE + 0x00020000)
+#define EP93XX_I2S_BASE EP93XX_APB_IOMEM(0x00020000)
-#define EP93XX_SECURITY_BASE (EP93XX_APB_VIRT_BASE + 0x00030000)
+#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000)
-#define EP93XX_GPIO_BASE (EP93XX_APB_VIRT_BASE + 0x00040000)
+#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
#define EP93XX_GPIO_F_INT_TYPE1 EP93XX_GPIO_REG(0x4c)
#define EP93XX_GPIO_F_INT_TYPE2 EP93XX_GPIO_REG(0x50)
@@ -124,32 +127,32 @@
#define EP93XX_GPIO_B_INT_ENABLE EP93XX_GPIO_REG(0xb8)
#define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc)
-#define EP93XX_AAC_BASE (EP93XX_APB_VIRT_BASE + 0x00080000)
+#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000)
-#define EP93XX_SPI_BASE (EP93XX_APB_VIRT_BASE + 0x000a0000)
+#define EP93XX_SPI_BASE EP93XX_APB_IOMEM(0x000a0000)
-#define EP93XX_IRDA_BASE (EP93XX_APB_VIRT_BASE + 0x000b0000)
+#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000)
-#define EP93XX_UART1_BASE (EP93XX_APB_VIRT_BASE + 0x000c0000)
#define EP93XX_UART1_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000c0000)
+#define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000)
-#define EP93XX_UART2_BASE (EP93XX_APB_VIRT_BASE + 0x000d0000)
#define EP93XX_UART2_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000d0000)
+#define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000)
-#define EP93XX_UART3_BASE (EP93XX_APB_VIRT_BASE + 0x000e0000)
#define EP93XX_UART3_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000e0000)
+#define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000)
-#define EP93XX_KEY_MATRIX_BASE (EP93XX_APB_VIRT_BASE + 0x000f0000)
+#define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000)
-#define EP93XX_ADC_BASE (EP93XX_APB_VIRT_BASE + 0x00100000)
-#define EP93XX_TOUCHSCREEN_BASE (EP93XX_APB_VIRT_BASE + 0x00100000)
+#define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000)
+#define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000)
-#define EP93XX_PWM_BASE (EP93XX_APB_VIRT_BASE + 0x00110000)
+#define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000)
-#define EP93XX_RTC_BASE (EP93XX_APB_VIRT_BASE + 0x00120000)
#define EP93XX_RTC_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x00120000)
+#define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000)
-#define EP93XX_SYSCON_BASE (EP93XX_APB_VIRT_BASE + 0x00130000)
+#define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000)
#define EP93XX_SYSCON_REG(x) (EP93XX_SYSCON_BASE + (x))
#define EP93XX_SYSCON_POWER_STATE EP93XX_SYSCON_REG(0x00)
#define EP93XX_SYSCON_PWRCNT EP93XX_SYSCON_REG(0x04)
@@ -179,7 +182,7 @@
#define EP93XX_SYSCON_DEVICE_CONFIG_U1EN (1<<18)
#define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0)
-#define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000)
+#define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000)
#endif
diff --git a/arch/arm/mach-ep93xx/include/mach/hardware.h b/arch/arm/mach-ep93xx/include/mach/hardware.h
index 2866297310b7..587c07bdec5b 100644
--- a/arch/arm/mach-ep93xx/include/mach/hardware.h
+++ b/arch/arm/mach-ep93xx/include/mach/hardware.h
@@ -12,4 +12,18 @@
#include "ts72xx.h"
+/*
+ * The EP93xx has two external crystal oscillators. To generate the
+ * required high-frequency clocks, the processor uses two phase-locked-
+ * loops (PLLs) to multiply the incoming external clock signal to much
+ * higher frequencies that are then divided down by programmable dividers
+ * to produce the needed clocks. The PLLs operate independently of one
+ * another.
+ */
+#define EP93XX_EXT_CLK_RATE 14745600
+#define EP93XX_EXT_RTC_RATE 32768
+
+#define EP93XX_KEYTCHCLK_DIV4 (EP93XX_EXT_CLK_RATE / 4)
+#define EP93XX_KEYTCHCLK_DIV16 (EP93XX_EXT_CLK_RATE / 16)
+
#endif
diff --git a/arch/arm/mach-ep93xx/include/mach/io.h b/arch/arm/mach-ep93xx/include/mach/io.h
index fd5f081cc8b7..cebcc1c53d63 100644
--- a/arch/arm/mach-ep93xx/include/mach/io.h
+++ b/arch/arm/mach-ep93xx/include/mach/io.h
@@ -1,8 +1,21 @@
/*
* arch/arm/mach-ep93xx/include/mach/io.h
*/
+#ifndef __ASM_MACH_IO_H
+#define __ASM_MACH_IO_H
#define IO_SPACE_LIMIT 0xffffffff
-#define __io(p) __typesafe_io(p)
-#define __mem_pci(p) (p)
+#define __io(p) __typesafe_io(p)
+#define __mem_pci(p) (p)
+
+/*
+ * A typesafe __io() variation for variable initialisers
+ */
+#ifdef __ASSEMBLER__
+#define IOMEM(p) p
+#else
+#define IOMEM(p) ((void __iomem __force *)(p))
+#endif
+
+#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
new file mode 100644
index 000000000000..2a02b49c40f0
--- /dev/null
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -0,0 +1,21 @@
+if ARCH_NOMADIK
+
+menu "Nomadik boards"
+
+config MACH_NOMADIK_8815NHK
+ bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
+ select NOMADIK_8815
+
+endmenu
+
+config NOMADIK_8815
+ bool
+
+
+config I2C_BITBANG_8815NHK
+ tristate "Driver for bit-bang busses found on the 8815 NHK"
+ depends on I2C && MACH_NOMADIK_8815NHK
+ select I2C_ALGOBIT
+ default y
+
+endif
diff --git a/arch/arm/mach-nomadik/Makefile b/arch/arm/mach-nomadik/Makefile
new file mode 100644
index 000000000000..412040982a40
--- /dev/null
+++ b/arch/arm/mach-nomadik/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+
+# Object file lists.
+
+obj-y += clock.o timer.o gpio.o
+
+# Cpu revision
+obj-$(CONFIG_NOMADIK_8815) += cpu-8815.o
+
+# Specific board support
+obj-$(CONFIG_MACH_NOMADIK_8815NHK) += board-nhk8815.o
+
+# Nomadik extra devices
+obj-$(CONFIG_I2C_BITBANG_8815NHK) += i2c-8815nhk.o
diff --git a/arch/arm/mach-nomadik/Makefile.boot b/arch/arm/mach-nomadik/Makefile.boot
new file mode 100644
index 000000000000..c7e75acfe6c9
--- /dev/null
+++ b/arch/arm/mach-nomadik/Makefile.boot
@@ -0,0 +1,4 @@
+ zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
+
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
new file mode 100644
index 000000000000..79bdea943eb4
--- /dev/null
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -0,0 +1,111 @@
+/*
+ * linux/arch/arm/mach-nomadik/board-8815nhk.c
+ *
+ * Copyright (C) STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * NHK15 board specifc driver definition
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <mach/setup.h>
+#include "clock.h"
+
+#define __MEM_4K_RESOURCE(x) \
+ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+
+static struct amba_device uart0_device = {
+ .dev = { .init_name = "uart0" },
+ __MEM_4K_RESOURCE(NOMADIK_UART0_BASE),
+ .irq = {IRQ_UART0, NO_IRQ},
+};
+
+static struct amba_device uart1_device = {
+ .dev = { .init_name = "uart1" },
+ __MEM_4K_RESOURCE(NOMADIK_UART1_BASE),
+ .irq = {IRQ_UART1, NO_IRQ},
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ &uart0_device,
+ &uart1_device,
+};
+
+/* We have a fixed clock alone, by now */
+static struct clk nhk8815_clk_48 = {
+ .rate = 48*1000*1000,
+};
+
+static struct resource nhk8815_eth_resources[] = {
+ {
+ .name = "smc91x-regs",
+ .start = 0x34000000 + 0x300,
+ .end = 0x34000000 + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = NOMADIK_GPIO_TO_IRQ(115),
+ .end = NOMADIK_GPIO_TO_IRQ(115),
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING,
+ }
+};
+
+static struct platform_device nhk8815_eth_device = {
+ .name = "smc91x",
+ .resource = nhk8815_eth_resources,
+ .num_resources = ARRAY_SIZE(nhk8815_eth_resources),
+};
+
+static int __init nhk8815_eth_init(void)
+{
+ int gpio_nr = 115; /* hardwired in the board */
+ int err;
+
+ err = gpio_request(gpio_nr, "eth_irq");
+ if (!err) err = nmk_gpio_set_mode(gpio_nr, NMK_GPIO_ALT_GPIO);
+ if (!err) err = gpio_direction_input(gpio_nr);
+ if (err)
+ pr_err("Error %i in %s\n", err, __func__);
+ return err;
+}
+device_initcall(nhk8815_eth_init);
+
+static struct platform_device *nhk8815_platform_devices[] __initdata = {
+ &nhk8815_eth_device,
+ /* will add more devices */
+};
+
+static void __init nhk8815_platform_init(void)
+{
+ int i;
+
+ cpu8815_platform_init();
+ platform_add_devices(nhk8815_platform_devices,
+ ARRAY_SIZE(nhk8815_platform_devices));
+
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
+ nmdk_clk_create(&nhk8815_clk_48, amba_devs[i]->dev.init_name);
+ amba_device_register(amba_devs[i], &iomem_resource);
+ }
+}
+
+MACHINE_START(NOMADIK, "NHK8815")
+ /* Maintainer: ST MicroElectronics */
+ .phys_io = NOMADIK_UART0_BASE,
+ .io_pg_offst = (IO_ADDRESS(NOMADIK_UART0_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x100,
+ .map_io = cpu8815_map_io,
+ .init_irq = cpu8815_init_irq,
+ .timer = &nomadik_timer,
+ .init_machine = nhk8815_platform_init,
+MACHINE_END
diff --git a/arch/arm/mach-nomadik/clock.c b/arch/arm/mach-nomadik/clock.c
new file mode 100644
index 000000000000..9f92502a0083
--- /dev/null
+++ b/arch/arm/mach-nomadik/clock.c
@@ -0,0 +1,45 @@
+/*
+ * linux/arch/arm/mach-nomadik/clock.c
+ *
+ * Copyright (C) 2009 Alessandro Rubini
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <asm/clkdev.h>
+#include "clock.h"
+
+/*
+ * The nomadik board uses generic clocks, but the serial pl011 file
+ * calls clk_enable(), clk_disable(), clk_get_rate(), so we provide them
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+/* enable and disable do nothing */
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+/* Create a clock structure with the given name */
+int nmdk_clk_create(struct clk *clk, const char *dev_id)
+{
+ struct clk_lookup *clkdev;
+
+ clkdev = clkdev_alloc(clk, NULL, dev_id);
+ if (!clkdev)
+ return -ENOMEM;
+ clkdev_add(clkdev);
+ return 0;
+}
diff --git a/arch/arm/mach-nomadik/clock.h b/arch/arm/mach-nomadik/clock.h
new file mode 100644
index 000000000000..235faec7f627
--- /dev/null
+++ b/arch/arm/mach-nomadik/clock.h
@@ -0,0 +1,14 @@
+
+/*
+ * linux/arch/arm/mach-nomadik/clock.h
+ *
+ * Copyright (C) 2009 Alessandro Rubini
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+struct clk {
+ unsigned long rate;
+};
+extern int nmdk_clk_create(struct clk *clk, const char *dev_id);
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
new file mode 100644
index 000000000000..f93c59634191
--- /dev/null
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright STMicroelectronics, 2007.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/gpio.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <asm/mach/map.h>
+#include <asm/hardware/vic.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/cache-l2x0.h>
+
+/* The 8815 has 4 GPIO blocks, let's register them immediately */
+static struct nmk_gpio_platform_data cpu8815_gpio[] = {
+ {
+ .name = "GPIO-0-31",
+ .first_gpio = 0,
+ .first_irq = NOMADIK_GPIO_TO_IRQ(0),
+ .parent_irq = IRQ_GPIO0,
+ }, {
+ .name = "GPIO-32-63",
+ .first_gpio = 32,
+ .first_irq = NOMADIK_GPIO_TO_IRQ(32),
+ .parent_irq = IRQ_GPIO1,
+ }, {
+ .name = "GPIO-64-95",
+ .first_gpio = 64,
+ .first_irq = NOMADIK_GPIO_TO_IRQ(64),
+ .parent_irq = IRQ_GPIO2,
+ }, {
+ .name = "GPIO-96-127", /* 124..127 not routed to pin */
+ .first_gpio = 96,
+ .first_irq = NOMADIK_GPIO_TO_IRQ(96),
+ .parent_irq = IRQ_GPIO3,
+ }
+};
+
+#define __MEM_4K_RESOURCE(x) \
+ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+
+static struct amba_device cpu8815_amba_gpio[] = {
+ {
+ .dev = {
+ .init_name = "gpio0",
+ .platform_data = cpu8815_gpio + 0,
+ },
+ __MEM_4K_RESOURCE(NOMADIK_GPIO0_BASE),
+ }, {
+ .dev = {
+ .init_name = "gpio1",
+ .platform_data = cpu8815_gpio + 1,
+ },
+ __MEM_4K_RESOURCE(NOMADIK_GPIO1_BASE),
+ }, {
+ .dev = {
+ .init_name = "gpio2",
+ .platform_data = cpu8815_gpio + 2,
+ },
+ __MEM_4K_RESOURCE(NOMADIK_GPIO2_BASE),
+ }, {
+ .dev = {
+ .init_name = "gpio3",
+ .platform_data = cpu8815_gpio + 3,
+ },
+ __MEM_4K_RESOURCE(NOMADIK_GPIO3_BASE),
+ },
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ cpu8815_amba_gpio + 0,
+ cpu8815_amba_gpio + 1,
+ cpu8815_amba_gpio + 2,
+ cpu8815_amba_gpio + 3,
+};
+
+static int __init cpu8815_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+ return 0;
+}
+arch_initcall(cpu8815_init);
+
+/* All SoC devices live in the same area (see hardware.h) */
+static struct map_desc nomadik_io_desc[] __initdata = {
+ {
+ .virtual = NOMADIK_IO_VIRTUAL,
+ .pfn = __phys_to_pfn(NOMADIK_IO_PHYSICAL),
+ .length = NOMADIK_IO_SIZE,
+ .type = MT_DEVICE,
+ }
+ /* static ram and secured ram may be added later */
+};
+
+void __init cpu8815_map_io(void)
+{
+ iotable_init(nomadik_io_desc, ARRAY_SIZE(nomadik_io_desc));
+}
+
+void __init cpu8815_init_irq(void)
+{
+ /* This modified VIC cell has two register blocks, at 0 and 0x20 */
+ vic_init(io_p2v(NOMADIK_IC_BASE + 0x00), IRQ_VIC_START + 0, ~0, 0);
+ vic_init(io_p2v(NOMADIK_IC_BASE + 0x20), IRQ_VIC_START + 32, ~0, 0);
+}
+
+/*
+ * This function is called from the board init ("init_machine").
+ */
+ void __init cpu8815_platform_init(void)
+{
+#ifdef CONFIG_CACHE_L2X0
+ /* At full speed latency must be >=2, so 0x249 in low bits */
+ l2x0_init(io_p2v(NOMADIK_L2CC_BASE), 0x00730249, 0xfe000fff);
+#endif
+ return;
+}
diff --git a/arch/arm/mach-nomadik/gpio.c b/arch/arm/mach-nomadik/gpio.c
new file mode 100644
index 000000000000..9a09b2791e03
--- /dev/null
+++ b/arch/arm/mach-nomadik/gpio.c
@@ -0,0 +1,396 @@
+/*
+ * Generic GPIO driver for logic cells found in the Nomadik SoC
+ *
+ * Copyright (C) 2008,2009 STMicroelectronics
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+
+/*
+ * The GPIO module in the Nomadik family of Systems-on-Chip is an
+ * AMBA device, managing 32 pins and alternate functions. The logic block
+ * is currently only used in the Nomadik.
+ *
+ * Symbols in this file are called "nmk_gpio" for "nomadik gpio"
+ */
+
+#define NMK_GPIO_PER_CHIP 32
+struct nmk_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *addr;
+ unsigned int parent_irq;
+ spinlock_t *lock;
+ /* Keep track of configured edges */
+ u32 edge_rising;
+ u32 edge_falling;
+};
+
+/* Mode functions */
+int nmk_gpio_set_mode(int gpio, int gpio_mode)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 afunc, bfunc, bit;
+
+ nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ bit = 1 << (gpio - nmk_chip->chip.base);
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
+ if (gpio_mode & NMK_GPIO_ALT_A)
+ afunc |= bit;
+ if (gpio_mode & NMK_GPIO_ALT_B)
+ bfunc |= bit;
+ writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA);
+ writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(nmk_gpio_set_mode);
+
+int nmk_gpio_get_mode(int gpio)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ u32 afunc, bfunc, bit;
+
+ nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ bit = 1 << (gpio - nmk_chip->chip.base);
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
+
+ return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
+EXPORT_SYMBOL(nmk_gpio_get_mode);
+
+
+/* IRQ functions */
+static inline int nmk_gpio_get_bitmask(int gpio)
+{
+ return 1 << (gpio % 32);
+}
+
+static void nmk_gpio_irq_ack(unsigned int irq)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(irq);
+ nmk_chip = get_irq_chip_data(irq);
+ if (!nmk_chip)
+ return;
+ writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
+}
+
+static void nmk_gpio_irq_mask(unsigned int irq)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask, reg;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(irq);
+ nmk_chip = get_irq_chip_data(irq);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return;
+
+ /* we must individually clear the two edges */
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ if (nmk_chip->edge_rising & bitmask) {
+ reg = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ }
+ if (nmk_chip->edge_falling & bitmask) {
+ reg = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ }
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+};
+
+static void nmk_gpio_irq_unmask(unsigned int irq)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask, reg;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(irq);
+ nmk_chip = get_irq_chip_data(irq);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return;
+
+ /* we must individually set the two edges */
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ if (nmk_chip->edge_rising & bitmask) {
+ reg = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
+ reg |= bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ }
+ if (nmk_chip->edge_falling & bitmask) {
+ reg = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+ reg |= bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ }
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+}
+
+static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(irq);
+ nmk_chip = get_irq_chip_data(irq);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+
+ nmk_chip->edge_rising &= ~bitmask;
+ if (type & IRQ_TYPE_EDGE_RISING)
+ nmk_chip->edge_rising |= bitmask;
+ writel(nmk_chip->edge_rising, nmk_chip->addr + NMK_GPIO_RIMSC);
+
+ nmk_chip->edge_falling &= ~bitmask;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ nmk_chip->edge_falling |= bitmask;
+ writel(nmk_chip->edge_falling, nmk_chip->addr + NMK_GPIO_FIMSC);
+
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ nmk_gpio_irq_unmask(irq);
+
+ return 0;
+}
+
+static struct irq_chip nmk_gpio_irq_chip = {
+ .name = "Nomadik-GPIO",
+ .ack = nmk_gpio_irq_ack,
+ .mask = nmk_gpio_irq_mask,
+ .unmask = nmk_gpio_irq_unmask,
+ .set_type = nmk_gpio_irq_set_type,
+};
+
+static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ struct irq_chip *host_chip;
+ unsigned int gpio_irq;
+ u32 pending;
+ unsigned int first_irq;
+
+ nmk_chip = get_irq_data(irq);
+ first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
+ while ( (pending = readl(nmk_chip->addr + NMK_GPIO_IS)) ) {
+ gpio_irq = first_irq + __ffs(pending);
+ generic_handle_irq(gpio_irq);
+ }
+ if (0) {/* don't ack parent irq, as ack == disable */
+ host_chip = get_irq_chip(irq);
+ host_chip->ack(irq);
+ }
+}
+
+static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
+{
+ unsigned int first_irq;
+ int i;
+
+ first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
+ for (i = first_irq; i < first_irq + NMK_GPIO_PER_CHIP; i++) {
+ set_irq_chip(i, &nmk_gpio_irq_chip);
+ set_irq_handler(i, handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ set_irq_chip_data(i, nmk_chip);
+ }
+ set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
+ set_irq_data(nmk_chip->parent_irq, nmk_chip);
+ return 0;
+}
+
+/* I/O Functions */
+static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+ return 0;
+}
+
+static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
+ return 0;
+}
+
+static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ u32 bit = 1 << offset;
+
+ return (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+}
+
+static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ u32 bit = 1 << offset;
+
+ if (val)
+ writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
+ else
+ writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+}
+
+/* This structure is replicated for each GPIO block allocated at probe time */
+static struct gpio_chip nmk_gpio_template = {
+ .direction_input = nmk_gpio_make_input,
+ .get = nmk_gpio_get_input,
+ .direction_output = nmk_gpio_make_output,
+ .set = nmk_gpio_set_output,
+ .ngpio = NMK_GPIO_PER_CHIP,
+ .can_sleep = 0,
+};
+
+static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
+{
+ struct nmk_gpio_platform_data *pdata;
+ struct nmk_gpio_chip *nmk_chip;
+ struct gpio_chip *chip;
+ int ret;
+
+ pdata = dev->dev.platform_data;
+ ret = amba_request_regions(dev, pdata->name);
+ if (ret)
+ return ret;
+
+ nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
+ if (!nmk_chip) {
+ ret = -ENOMEM;
+ goto out_amba;
+ }
+ /*
+ * The virt address in nmk_chip->addr is in the nomadik register space,
+ * so we can simply convert the resource address, without remapping
+ */
+ nmk_chip->addr = io_p2v(dev->res.start);
+ nmk_chip->chip = nmk_gpio_template;
+ nmk_chip->parent_irq = pdata->parent_irq;
+
+ chip = &nmk_chip->chip;
+ chip->base = pdata->first_gpio;
+ chip->label = pdata->name;
+ chip->dev = &dev->dev;
+ chip->owner = THIS_MODULE;
+
+ ret = gpiochip_add(&nmk_chip->chip);
+ if (ret)
+ goto out_free;
+
+ amba_set_drvdata(dev, nmk_chip);
+
+ nmk_gpio_init_irq(nmk_chip);
+
+ dev_info(&dev->dev, "Bits %i-%i at address %p\n",
+ nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+ return 0;
+
+ out_free:
+ kfree(nmk_chip);
+ out_amba:
+ amba_release_regions(dev);
+ dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
+ pdata->first_gpio, pdata->first_gpio+31);
+ return ret;
+}
+
+static int nmk_gpio_remove(struct amba_device *dev)
+{
+ struct nmk_gpio_chip *nmk_chip;
+
+ nmk_chip = amba_get_drvdata(dev);
+ gpiochip_remove(&nmk_chip->chip);
+ kfree(nmk_chip);
+ amba_release_regions(dev);
+ return 0;
+}
+
+
+/* We have 0x1f080060 and 0x1f180060, accept both using the mask */
+static struct amba_id nmk_gpio_ids[] = {
+ {
+ .id = 0x1f080060,
+ .mask = 0xffefffff,
+ },
+ {0, 0},
+};
+
+static struct amba_driver nmk_gpio_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "gpio",
+ },
+ .probe = nmk_gpio_probe,
+ .remove = nmk_gpio_remove,
+ .suspend = NULL, /* to be done */
+ .resume = NULL,
+ .id_table = nmk_gpio_ids,
+};
+
+static int __init nmk_gpio_init(void)
+{
+ return amba_driver_register(&nmk_gpio_driver);
+}
+
+arch_initcall(nmk_gpio_init);
+
+MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
+MODULE_DESCRIPTION("Nomadik GPIO Driver");
+MODULE_LICENSE("GPL");
+
+
diff --git a/arch/arm/mach-nomadik/i2c-8815nhk.c b/arch/arm/mach-nomadik/i2c-8815nhk.c
new file mode 100644
index 000000000000..abfe25a08d6b
--- /dev/null
+++ b/arch/arm/mach-nomadik/i2c-8815nhk.c
@@ -0,0 +1,65 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-gpio.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+/*
+ * There are two busses in the 8815NHK.
+ * They could, in theory, be driven by the hardware component, but we
+ * use bit-bang through GPIO by now, to keep things simple
+ */
+
+static struct i2c_gpio_platform_data nhk8815_i2c_data0 = {
+ /* keep defaults for timeouts; pins are push-pull bidirectional */
+ .scl_pin = 62,
+ .sda_pin = 63,
+};
+
+static struct i2c_gpio_platform_data nhk8815_i2c_data1 = {
+ /* keep defaults for timeouts; pins are push-pull bidirectional */
+ .scl_pin = 53,
+ .sda_pin = 54,
+};
+
+/* first bus: GPIO XX and YY */
+static struct platform_device nhk8815_i2c_dev0 = {
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &nhk8815_i2c_data0,
+ },
+};
+/* second bus: GPIO XX and YY */
+static struct platform_device nhk8815_i2c_dev1 = {
+ .name = "i2c-gpio",
+ .id = 1,
+ .dev = {
+ .platform_data = &nhk8815_i2c_data1,
+ },
+};
+
+static int __init nhk8815_i2c_init(void)
+{
+ nmk_gpio_set_mode(nhk8815_i2c_data0.scl_pin, NMK_GPIO_ALT_GPIO);
+ nmk_gpio_set_mode(nhk8815_i2c_data0.sda_pin, NMK_GPIO_ALT_GPIO);
+ platform_device_register(&nhk8815_i2c_dev0);
+
+ nmk_gpio_set_mode(nhk8815_i2c_data1.scl_pin, NMK_GPIO_ALT_GPIO);
+ nmk_gpio_set_mode(nhk8815_i2c_data1.sda_pin, NMK_GPIO_ALT_GPIO);
+ platform_device_register(&nhk8815_i2c_dev1);
+
+ return 0;
+}
+
+static void __exit nhk8815_i2c_exit(void)
+{
+ platform_device_unregister(&nhk8815_i2c_dev0);
+ platform_device_unregister(&nhk8815_i2c_dev1);
+ return;
+}
+
+module_init(nhk8815_i2c_init);
+module_exit(nhk8815_i2c_exit);
diff --git a/arch/arm/mach-nomadik/include/mach/clkdev.h b/arch/arm/mach-nomadik/include/mach/clkdev.h
new file mode 100644
index 000000000000..04b37a89801c
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-nomadik/include/mach/debug-macro.S b/arch/arm/mach-nomadik/include/mach/debug-macro.S
new file mode 100644
index 000000000000..e876990e1569
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/debug-macro.S
@@ -0,0 +1,22 @@
+/*
+ * Debugging macro include header
+ *
+ * Copyright (C) 1994-1999 Russell King
+ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x10000000 @ physical base address
+ movne \rx, #0xf0000000 @ virtual base
+ add \rx, \rx, #0x00100000
+ add \rx, \rx, #0x000fb000
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-nomadik/include/mach/entry-macro.S b/arch/arm/mach-nomadik/include/mach/entry-macro.S
new file mode 100644
index 000000000000..49f1aa3bb420
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/entry-macro.S
@@ -0,0 +1,43 @@
+/*
+ * Low-level IRQ helper macros for Nomadik platforms
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =IO_ADDRESS(NOMADIK_IC_BASE)
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+
+ /* This stanza gets the irq mask from one of two status registers */
+ mov \irqnr, #0
+ ldr \irqstat, [\base, #VIC_REG_IRQSR0] @ get masked status
+ cmp \irqstat, #0
+ bne 1001f
+ add \irqnr, \irqnr, #32
+ ldr \irqstat, [\base, #VIC_REG_IRQSR1] @ get masked status
+
+1001: tst \irqstat, #15
+ bne 1002f
+ add \irqnr, \irqnr, #4
+ movs \irqstat, \irqstat, lsr #4
+ bne 1001b
+1002: tst \irqstat, #1
+ bne 1003f
+ add \irqnr, \irqnr, #1
+ movs \irqstat, \irqstat, lsr #1
+ bne 1002b
+1003: /* EQ will be set if no irqs pending */
+ .endm
diff --git a/arch/arm/mach-nomadik/include/mach/gpio.h b/arch/arm/mach-nomadik/include/mach/gpio.h
new file mode 100644
index 000000000000..61577c9f9a7d
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/gpio.h
@@ -0,0 +1,71 @@
+/*
+ * Structures and registers for GPIO access in the Nomadik SoC
+ *
+ * Copyright (C) 2008 STMicroelectronics
+ * Author: Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARCH_GPIO_H
+#define __ASM_ARCH_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+/*
+ * These currently cause a function call to happen, they may be optimized
+ * if needed by adding cpu-specific defines to identify blocks
+ * (see mach-pxa/include/mach/gpio.h as an example using GPLR etc)
+ */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+/*
+ * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
+ * the "gpio" namespace for generic and cross-machine functions
+ */
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+extern int nmk_gpio_set_mode(int gpio, int gpio_mode);
+extern int nmk_gpio_get_mode(int gpio);
+
+/*
+ * Platform data to register a block: only the initial gpio/irq number.
+ */
+struct nmk_gpio_platform_data {
+ char *name;
+ int first_gpio;
+ int first_irq;
+ int parent_irq;
+};
+
+#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-nomadik/include/mach/hardware.h b/arch/arm/mach-nomadik/include/mach/hardware.h
new file mode 100644
index 000000000000..6316dba3bfc8
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/hardware.h
@@ -0,0 +1,90 @@
+/*
+ * This file contains the hardware definitions of the Nomadik.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * YOU should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_HARDWARE_H
+#define __ASM_ARCH_HARDWARE_H
+
+/* Nomadik registers live from 0x1000.0000 to 0x1023.0000 -- currently */
+#define NOMADIK_IO_VIRTUAL 0xF0000000 /* VA of IO */
+#define NOMADIK_IO_PHYSICAL 0x10000000 /* PA of IO */
+#define NOMADIK_IO_SIZE 0x00300000 /* 3MB for all regs */
+
+/* used in C code, so cast to proper type */
+#define io_p2v(x) ((void __iomem *)(x) \
+ - NOMADIK_IO_PHYSICAL + NOMADIK_IO_VIRTUAL)
+#define io_v2p(x) ((unsigned long)(x) \
+ - NOMADIK_IO_VIRTUAL + NOMADIK_IO_PHYSICAL)
+
+/* used in asm code, so no casts */
+#define IO_ADDRESS(x) ((x) - NOMADIK_IO_PHYSICAL + NOMADIK_IO_VIRTUAL)
+
+/*
+ * Base address defination for Nomadik Onchip Logic Block
+ */
+#define NOMADIK_FSMC_BASE 0x10100000 /* FSMC registers */
+#define NOMADIK_SDRAMC_BASE 0x10110000 /* SDRAM Controller */
+#define NOMADIK_CLCDC_BASE 0x10120000 /* CLCD Controller */
+#define NOMADIK_MDIF_BASE 0x10120000 /* MDIF */
+#define NOMADIK_DMA0_BASE 0x10130000 /* DMA0 Controller */
+#define NOMADIK_IC_BASE 0x10140000 /* Vectored Irq Controller */
+#define NOMADIK_DMA1_BASE 0x10150000 /* DMA1 Controller */
+#define NOMADIK_USB_BASE 0x10170000 /* USB-OTG conf reg base */
+#define NOMADIK_CRYP_BASE 0x10180000 /* Crypto processor */
+#define NOMADIK_SHA1_BASE 0x10190000 /* SHA-1 Processor */
+#define NOMADIK_XTI_BASE 0x101A0000 /* XTI */
+#define NOMADIK_RNG_BASE 0x101B0000 /* Random number generator */
+#define NOMADIK_SRC_BASE 0x101E0000 /* SRC base */
+#define NOMADIK_WDOG_BASE 0x101E1000 /* Watchdog */
+#define NOMADIK_MTU0_BASE 0x101E2000 /* Multiple Timer 0 */
+#define NOMADIK_MTU1_BASE 0x101E3000 /* Multiple Timer 1 */
+#define NOMADIK_GPIO0_BASE 0x101E4000 /* GPIO0 */
+#define NOMADIK_GPIO1_BASE 0x101E5000 /* GPIO1 */
+#define NOMADIK_GPIO2_BASE 0x101E6000 /* GPIO2 */
+#define NOMADIK_GPIO3_BASE 0x101E7000 /* GPIO3 */
+#define NOMADIK_RTC_BASE 0x101E8000 /* Real Time Clock base */
+#define NOMADIK_PMU_BASE 0x101E9000 /* Power Management Unit */
+#define NOMADIK_OWM_BASE 0x101EA000 /* One wire master */
+#define NOMADIK_SCR_BASE 0x101EF000 /* Secure Control registers */
+#define NOMADIK_MSP2_BASE 0x101F0000 /* MSP 2 interface */
+#define NOMADIK_MSP1_BASE 0x101F1000 /* MSP 1 interface */
+#define NOMADIK_UART2_BASE 0x101F2000 /* UART 2 interface */
+#define NOMADIK_SSIRx_BASE 0x101F3000 /* SSI 8-ch rx interface */
+#define NOMADIK_SSITx_BASE 0x101F4000 /* SSI 8-ch tx interface */
+#define NOMADIK_MSHC_BASE 0x101F5000 /* Memory Stick(Pro) Host */
+#define NOMADIK_SDI_BASE 0x101F6000 /* SD-card/MM-Card */
+#define NOMADIK_I2C1_BASE 0x101F7000 /* I2C1 interface */
+#define NOMADIK_I2C0_BASE 0x101F8000 /* I2C0 interface */
+#define NOMADIK_MSP0_BASE 0x101F9000 /* MSP 0 interface */
+#define NOMADIK_FIRDA_BASE 0x101FA000 /* FIrDA interface */
+#define NOMADIK_UART1_BASE 0x101FB000 /* UART 1 interface */
+#define NOMADIK_SSP_BASE 0x101FC000 /* SSP interface */
+#define NOMADIK_UART0_BASE 0x101FD000 /* UART 0 interface */
+#define NOMADIK_SGA_BASE 0x101FE000 /* SGA interface */
+#define NOMADIK_L2CC_BASE 0x10210000 /* L2 Cache controller */
+
+/* Other ranges, not for p2v/v2p */
+#define NOMADIK_BACKUP_RAM 0x80010000
+#define NOMADIK_EBROM 0x80000000 /* Embedded boot ROM */
+#define NOMADIK_HAMACV_DMEM_BASE 0xA0100000 /* HAMACV Data Memory Start */
+#define NOMADIK_HAMACV_DMEM_END 0xA01FFFFF /* HAMACV Data Memory End */
+#define NOMADIK_HAMACA_DMEM 0xA0200000 /* HAMACA Data Memory Space */
+
+#define NOMADIK_FSMC_VA IO_ADDRESS(NOMADIK_FSMC_BASE)
+#define NOMADIK_MTU0_VA IO_ADDRESS(NOMADIK_MTU0_BASE)
+#define NOMADIK_MTU1_VA IO_ADDRESS(NOMADIK_MTU1_BASE)
+
+#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-nomadik/include/mach/io.h b/arch/arm/mach-nomadik/include/mach/io.h
new file mode 100644
index 000000000000..2e1eca1b8243
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/io.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-nomadik/include/mach/io.h (copied from mach-sa1100)
+ *
+ * Copyright (C) 1997-1999 Russell King
+ *
+ * Modifications:
+ * 06-12-1997 RMK Created.
+ * 07-04-1999 RMK Major cleanup
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+/*
+ * We don't actually have real ISA nor PCI buses, but there is so many
+ * drivers out there that might just work if we fake them...
+ */
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif
diff --git a/arch/arm/mach-nomadik/include/mach/irqs.h b/arch/arm/mach-nomadik/include/mach/irqs.h
new file mode 100644
index 000000000000..8faabc560398
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/irqs.h
@@ -0,0 +1,82 @@
+/*
+ * mach-nomadik/include/mach/irqs.h
+ *
+ * Copyright (C) ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_IRQS_H
+#define __ASM_ARCH_IRQS_H
+
+#include <mach/hardware.h>
+
+#define IRQ_VIC_START 0 /* first VIC interrupt is 0 */
+
+/*
+ * Interrupt numbers generic for all Nomadik Chip cuts
+ */
+#define IRQ_WATCHDOG 0
+#define IRQ_SOFTINT 1
+#define IRQ_CRYPTO 2
+#define IRQ_OWM 3
+#define IRQ_MTU0 4
+#define IRQ_MTU1 5
+#define IRQ_GPIO0 6
+#define IRQ_GPIO1 7
+#define IRQ_GPIO2 8
+#define IRQ_GPIO3 9
+#define IRQ_RTC_RTT 10
+#define IRQ_SSP 11
+#define IRQ_UART0 12
+#define IRQ_DMA1 13
+#define IRQ_CLCD_MDIF 14
+#define IRQ_DMA0 15
+#define IRQ_PWRFAIL 16
+#define IRQ_UART1 17
+#define IRQ_FIRDA 18
+#define IRQ_MSP0 19
+#define IRQ_I2C0 20
+#define IRQ_I2C1 21
+#define IRQ_SDMMC 22
+#define IRQ_USBOTG 23
+#define IRQ_SVA_IT0 24
+#define IRQ_SVA_IT1 25
+#define IRQ_SAA_IT0 26
+#define IRQ_SAA_IT1 27
+#define IRQ_UART2 28
+#define IRQ_MSP2 31
+#define IRQ_L2CC 48
+#define IRQ_HPI 49
+#define IRQ_SKE 50
+#define IRQ_KP 51
+#define IRQ_MEMST 54
+#define IRQ_SGA_IT 58
+#define IRQ_USBM 60
+#define IRQ_MSP1 62
+
+#define NOMADIK_SOC_NR_IRQS 64
+
+/* After chip-specific IRQ numbers we have the GPIO ones */
+#define NOMADIK_NR_GPIO 128 /* last 4 not wired to pins */
+#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + NOMADIK_SOC_NR_IRQS)
+#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - NOMADIK_SOC_NR_IRQS)
+#define NR_IRQS NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
+
+/* Following two are used by entry_macro.S, to access our dual-vic */
+#define VIC_REG_IRQSR0 0
+#define VIC_REG_IRQSR1 0x20
+
+#endif /* __ASM_ARCH_IRQS_H */
+
diff --git a/arch/arm/mach-nomadik/include/mach/memory.h b/arch/arm/mach-nomadik/include/mach/memory.h
new file mode 100644
index 000000000000..1e5689d98ecd
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/memory.h
@@ -0,0 +1,28 @@
+/*
+ * mach-nomadik/include/mach/memory.h
+ *
+ * Copyright (C) 1999 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+/*
+ * Physical DRAM offset.
+ */
+#define PHYS_OFFSET UL(0x00000000)
+
+#endif
diff --git a/arch/arm/mach-nomadik/include/mach/mtu.h b/arch/arm/mach-nomadik/include/mach/mtu.h
new file mode 100644
index 000000000000..76da7f085330
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/mtu.h
@@ -0,0 +1,45 @@
+#ifndef __ASM_ARCH_MTU_H
+#define __ASM_ARCH_MTU_H
+
+/*
+ * The MTU device hosts four different counters, with 4 set of
+ * registers. These are register names.
+ */
+
+#define MTU_IMSC 0x00 /* Interrupt mask set/clear */
+#define MTU_RIS 0x04 /* Raw interrupt status */
+#define MTU_MIS 0x08 /* Masked interrupt status */
+#define MTU_ICR 0x0C /* Interrupt clear register */
+
+/* per-timer registers take 0..3 as argument */
+#define MTU_LR(x) (0x10 + 0x10 * (x) + 0x00) /* Load value */
+#define MTU_VAL(x) (0x10 + 0x10 * (x) + 0x04) /* Current value */
+#define MTU_CR(x) (0x10 + 0x10 * (x) + 0x08) /* Control reg */
+#define MTU_BGLR(x) (0x10 + 0x10 * (x) + 0x0c) /* At next overflow */
+
+/* bits for the control register */
+#define MTU_CRn_ENA 0x80
+#define MTU_CRn_PERIODIC 0x40 /* if 0 = free-running */
+#define MTU_CRn_PRESCALE_MASK 0x0c
+#define MTU_CRn_PRESCALE_1 0x00
+#define MTU_CRn_PRESCALE_16 0x04
+#define MTU_CRn_PRESCALE_256 0x08
+#define MTU_CRn_32BITS 0x02
+#define MTU_CRn_ONESHOT 0x01 /* if 0 = wraps reloading from BGLR*/
+
+/* Other registers are usual amba/primecell registers, currently not used */
+#define MTU_ITCR 0xff0
+#define MTU_ITOP 0xff4
+
+#define MTU_PERIPH_ID0 0xfe0
+#define MTU_PERIPH_ID1 0xfe4
+#define MTU_PERIPH_ID2 0xfe8
+#define MTU_PERIPH_ID3 0xfeC
+
+#define MTU_PCELL0 0xff0
+#define MTU_PCELL1 0xff4
+#define MTU_PCELL2 0xff8
+#define MTU_PCELL3 0xffC
+
+#endif /* __ASM_ARCH_MTU_H */
+
diff --git a/arch/arm/mach-nomadik/include/mach/setup.h b/arch/arm/mach-nomadik/include/mach/setup.h
new file mode 100644
index 000000000000..a4e468cf63da
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/setup.h
@@ -0,0 +1,22 @@
+
+/*
+ * These symbols are needed for board-specific files to call their
+ * own cpu-specific files
+ */
+
+#ifndef __ASM_ARCH_SETUP_H
+#define __ASM_ARCH_SETUP_H
+
+#include <asm/mach/time.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_NOMADIK_8815
+
+extern void cpu8815_map_io(void);
+extern void cpu8815_platform_init(void);
+extern void cpu8815_init_irq(void);
+extern struct sys_timer nomadik_timer;
+
+#endif /* NOMADIK_8815 */
+
+#endif /* __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-nomadik/include/mach/system.h b/arch/arm/mach-nomadik/include/mach/system.h
new file mode 100644
index 000000000000..7119f688116e
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/system.h
@@ -0,0 +1,45 @@
+/*
+ * mach-nomadik/include/mach/system.h
+ *
+ * Copyright (C) 2008 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_SYSTEM_H
+#define __ASM_ARCH_SYSTEM_H
+
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+ void __iomem *src_rstsr = io_p2v(NOMADIK_SRC_BASE + 0x18);
+
+ /* FIXME: use egpio when implemented */
+
+ /* Write anything to Reset status register */
+ writel(1, src_rstsr);
+}
+
+#endif
diff --git a/arch/arm/mach-nomadik/include/mach/timex.h b/arch/arm/mach-nomadik/include/mach/timex.h
new file mode 100644
index 000000000000..318b8896ce96
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/timex.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_ARCH_TIMEX_H
+#define __ASM_ARCH_TIMEX_H
+
+#define CLOCK_TICK_RATE 2400000
+
+#endif
diff --git a/arch/arm/mach-nomadik/include/mach/uncompress.h b/arch/arm/mach-nomadik/include/mach/uncompress.h
new file mode 100644
index 000000000000..071003bc8456
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/uncompress.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARCH_UNCOMPRESS_H
+#define __ASM_ARCH_UNCOMPRESS_H
+
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <mach/hardware.h>
+
+/* we need the constants in amba/serial.h, but it refers to amba_device */
+struct amba_device;
+#include <linux/amba/serial.h>
+
+#define NOMADIK_UART_DR 0x101FB000
+#define NOMADIK_UART_LCRH 0x101FB02c
+#define NOMADIK_UART_CR 0x101FB030
+#define NOMADIK_UART_FR 0x101FB018
+
+static void putc(const char c)
+{
+ /* Do nothing if the UART is not enabled. */
+ if (!(readb(NOMADIK_UART_CR) & UART01x_CR_UARTEN))
+ return;
+
+ if (c == '\n')
+ putc('\r');
+
+ while (readb(NOMADIK_UART_FR) & UART01x_FR_TXFF)
+ barrier();
+ writeb(c, NOMADIK_UART_DR);
+}
+
+static void flush(void)
+{
+ if (!(readb(NOMADIK_UART_CR) & UART01x_CR_UARTEN))
+ return;
+ while (readb(NOMADIK_UART_FR) & UART01x_FR_BUSY)
+ barrier();
+}
+
+static inline void arch_decomp_setup(void)
+{
+}
+
+#define arch_decomp_wdog() /* nothing to do here */
+
+#endif /* __ASM_ARCH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h
new file mode 100644
index 000000000000..be12e31ea528
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/vmalloc.h
@@ -0,0 +1,2 @@
+
+#define VMALLOC_END 0xe8000000
diff --git a/arch/arm/mach-nomadik/timer.c b/arch/arm/mach-nomadik/timer.c
new file mode 100644
index 000000000000..d1738e7061d4
--- /dev/null
+++ b/arch/arm/mach-nomadik/timer.c
@@ -0,0 +1,164 @@
+/*
+ * linux/arch/arm/mach-nomadik/timer.c
+ *
+ * Copyright (C) 2008 STMicroelectronics
+ * Copyright (C) 2009 Alessandro Rubini, somewhat based on at91sam926x
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/jiffies.h>
+#include <asm/mach/time.h>
+#include <mach/mtu.h>
+
+#define TIMER_CTRL 0x80 /* No divisor */
+#define TIMER_PERIODIC 0x40
+#define TIMER_SZ32BIT 0x02
+
+/* Initial value for SRC control register: all timers use MXTAL/8 source */
+#define SRC_CR_INIT_MASK 0x00007fff
+#define SRC_CR_INIT_VAL 0x2aaa8000
+
+static u32 nmdk_count; /* accumulated count */
+static u32 nmdk_cycle; /* write-once */
+static __iomem void *mtu_base;
+
+/*
+ * clocksource: the MTU device is a decrementing counters, so we negate
+ * the value being read.
+ */
+static cycle_t nmdk_read_timer(struct clocksource *cs)
+{
+ u32 count = readl(mtu_base + MTU_VAL(0));
+ return nmdk_count + nmdk_cycle - count;
+
+}
+
+static struct clocksource nmdk_clksrc = {
+ .name = "mtu_0",
+ .rating = 120,
+ .read = nmdk_read_timer,
+ .shift = 20,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * Clockevent device: currently only periodic mode is supported
+ */
+static void nmdk_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ unsigned long flags;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* enable interrupts -- and count current value? */
+ raw_local_irq_save(flags);
+ writel(readl(mtu_base + MTU_IMSC) | 1, mtu_base + MTU_IMSC);
+ raw_local_irq_restore(flags);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ BUG(); /* Not supported, yet */
+ /* FALLTHROUGH */
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ /* disable irq */
+ raw_local_irq_save(flags);
+ writel(readl(mtu_base + MTU_IMSC) & ~1, mtu_base + MTU_IMSC);
+ raw_local_irq_restore(flags);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device nmdk_clkevt = {
+ .name = "mtu_0",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .shift = 32,
+ .rating = 100,
+ .set_mode = nmdk_clkevt_mode,
+};
+
+/*
+ * IRQ Handler for the timer 0 of the MTU block. The irq is not shared
+ * as we are the only users of mtu0 by now.
+ */
+static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
+{
+ /* ack: "interrupt clear register" */
+ writel( 1 << 0, mtu_base + MTU_ICR);
+
+ /* we can't count lost ticks, unfortunately */
+ nmdk_count += nmdk_cycle;
+ nmdk_clkevt.event_handler(&nmdk_clkevt);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set up timer interrupt, and return the current time in seconds.
+ */
+static struct irqaction nmdk_timer_irq = {
+ .name = "Nomadik Timer Tick",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = nmdk_timer_interrupt,
+};
+
+static void nmdk_timer_reset(void)
+{
+ u32 cr;
+
+ writel(0, mtu_base + MTU_CR(0)); /* off */
+
+ /* configure load and background-load, and fire it up */
+ writel(nmdk_cycle, mtu_base + MTU_LR(0));
+ writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
+ cr = MTU_CRn_PERIODIC | MTU_CRn_PRESCALE_1 | MTU_CRn_32BITS;
+ writel(cr, mtu_base + MTU_CR(0));
+ writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
+}
+
+static void __init nmdk_timer_init(void)
+{
+ u32 src_cr;
+ unsigned long rate;
+ int bits;
+
+ rate = CLOCK_TICK_RATE; /* 2.4MHz */
+ nmdk_cycle = (rate + HZ/2) / HZ;
+
+ /* Configure timer sources in "system reset controller" ctrl reg */
+ src_cr = readl(io_p2v(NOMADIK_SRC_BASE));
+ src_cr &= SRC_CR_INIT_MASK;
+ src_cr |= SRC_CR_INIT_VAL;
+ writel(src_cr, io_p2v(NOMADIK_SRC_BASE));
+
+ /* Save global pointer to mtu, used by functions above */
+ mtu_base = io_p2v(NOMADIK_MTU0_BASE);
+
+ /* Init the timer and register clocksource */
+ nmdk_timer_reset();
+
+ nmdk_clksrc.mult = clocksource_hz2mult(rate, nmdk_clksrc.shift);
+ bits = 8*sizeof(nmdk_count);
+ nmdk_clksrc.mask = CLOCKSOURCE_MASK(bits);
+
+ clocksource_register(&nmdk_clksrc);
+
+ /* Register irq and clockevents */
+ setup_irq(IRQ_MTU0, &nmdk_timer_irq);
+ nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift);
+ nmdk_clkevt.cpumask = cpumask_of(0);
+ clockevents_register_device(&nmdk_clkevt);
+}
+
+struct sys_timer nomadik_timer = {
+ .init = nmdk_timer_init,
+};
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index e70fc7c66bbb..ed2a48a9ce74 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -36,7 +36,6 @@
#include <mach/hwa742.h>
#include <mach/lcd_mipid.h>
#include <mach/mmc.h>
-#include <mach/usb.h>
#include <mach/clock.h>
#define ADS7846_PENDOWN_GPIO 15
@@ -205,9 +204,11 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
static struct omap_mmc_platform_data nokia770_mmc2_data = {
.nr_slots = 1,
.dma_mask = 0xffffffff,
+ .max_freq = 12000000,
.slots[0] = {
.set_power = nokia770_mmc_set_power,
.get_cover_state = nokia770_mmc_get_cover_state,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.name = "mmcblk",
},
};
diff --git a/arch/arm/mach-omap1/mailbox.c b/arch/arm/mach-omap1/mailbox.c
index 0af4d6c85b47..6810b4aeb02c 100644
--- a/arch/arm/mach-omap1/mailbox.c
+++ b/arch/arm/mach-omap1/mailbox.c
@@ -203,5 +203,5 @@ module_exit(omap1_mbox_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions");
-MODULE_AUTHOR("Hiroshi DOYU" <Hiroshi.DOYU@nokia.com>);
+MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_ALIAS("platform:omap1-mailbox");
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index da93b86234ed..9a0bf6744a05 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -362,6 +362,7 @@ static struct omap_onenand_platform_data board_onenand_data = {
.gpio_irq = 65,
.parts = onenand_partitions,
.nr_parts = ARRAY_SIZE(onenand_partitions),
+ .flags = ONENAND_SYNC_READWRITE,
};
static void __init board_onenand_init(void)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 2fd22f9c5f0e..54fec53a48e7 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -31,6 +31,8 @@ static struct platform_device gpmc_onenand_device = {
static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
{
struct gpmc_timings t;
+ u32 reg;
+ int err;
const int t_cer = 15;
const int t_avdp = 12;
@@ -43,6 +45,11 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
const int t_wpl = 40;
const int t_wph = 30;
+ /* Ensure sync read and sync write are disabled */
+ reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
+ reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
+ writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
+
memset(&t, 0, sizeof(t));
t.sync_clk = 0;
t.cs_on = 0;
@@ -74,7 +81,16 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
GPMC_CONFIG1_DEVICESIZE_16 |
GPMC_CONFIG1_MUXADDDATA);
- return gpmc_cs_set_timings(cs, &t);
+ err = gpmc_cs_set_timings(cs, &t);
+ if (err)
+ return err;
+
+ /* Ensure sync read and sync write are disabled */
+ reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
+ reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
+ writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
+
+ return 0;
}
static void set_onenand_cfg(void __iomem *onenand_base, int latency,
@@ -124,7 +140,8 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
} else if (cfg->flags & ONENAND_SYNC_READWRITE) {
sync_read = 1;
sync_write = 1;
- }
+ } else
+ return omap2_onenand_set_async_mode(cs, onenand_base);
if (!freq) {
/* Very first call freq is not known */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 458990e20c60..a98201cc265c 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -48,6 +48,28 @@ int omap_chip_is(struct omap_chip_id oci)
}
EXPORT_SYMBOL(omap_chip_is);
+int omap_type(void)
+{
+ u32 val = 0;
+
+ if (cpu_is_omap24xx())
+ val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
+ else if (cpu_is_omap34xx())
+ val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
+ else {
+ pr_err("Cannot detect omap type!\n");
+ goto out;
+ }
+
+ val &= OMAP2_DEVICETYPE_MASK;
+ val >>= 8;
+
+out:
+ return val;
+}
+EXPORT_SYMBOL(omap_type);
+
+
/*----------------------------------------------------------------------------*/
#define OMAP_TAP_IDCODE 0x0204
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index fd5b8a5925cc..6f71f3730c97 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -282,12 +282,12 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
return -ENOMEM;
/* DSP or IVA2 IRQ */
- mbox_dsp_info.irq = platform_get_irq(pdev, 0);
- if (mbox_dsp_info.irq < 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(&pdev->dev, "invalid irq resource\n");
- ret = -ENODEV;
goto err_dsp;
}
+ mbox_dsp_info.irq = ret;
ret = omap_mbox_register(&pdev->dev, &mbox_dsp_info);
if (ret)
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index 9756a878fd90..1541fd4c8d0f 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -263,8 +263,19 @@ static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int vdd)
{
int ret = 0;
- struct twl_mmc_controller *c = &hsmmc[1];
+ struct twl_mmc_controller *c = NULL;
struct omap_mmc_platform_data *mmc = dev->platform_data;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(hsmmc); i++) {
+ if (mmc == hsmmc[i].mmc) {
+ c = &hsmmc[i];
+ break;
+ }
+ }
+
+ if (c == NULL)
+ return -ENODEV;
/* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
diff --git a/arch/arm/mach-realview/include/mach/gpio.h b/arch/arm/mach-realview/include/mach/gpio.h
new file mode 100644
index 000000000000..306ab4c9f2aa
--- /dev/null
+++ b/arch/arm/mach-realview/include/mach/gpio.h
@@ -0,0 +1 @@
+#include <asm-generic/gpio.h>
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 8dfa44e08a94..abd13b448671 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
+#include <linux/amba/pl061.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -113,6 +114,21 @@ static void __init realview_eb_map_io(void)
iotable_init(realview_eb11mp_io_desc, ARRAY_SIZE(realview_eb11mp_io_desc));
}
+static struct pl061_platform_data gpio0_plat_data = {
+ .gpio_base = 0,
+ .irq_base = -1,
+};
+
+static struct pl061_platform_data gpio1_plat_data = {
+ .gpio_base = 8,
+ .irq_base = -1,
+};
+
+static struct pl061_platform_data gpio2_plat_data = {
+ .gpio_base = 16,
+ .irq_base = -1,
+};
+
/*
* RealView EB AMBA devices
*/
@@ -189,9 +205,9 @@ AMBA_DEVICE(clcd, "dev:20", EB_CLCD, &clcd_plat_data);
AMBA_DEVICE(dmac, "dev:30", DMAC, NULL);
AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
AMBA_DEVICE(wdog, "dev:e1", EB_WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", EB_GPIO0, NULL);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, NULL);
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, NULL);
+AMBA_DEVICE(gpio0, "dev:e4", EB_GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
+AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
AMBA_DEVICE(rtc, "dev:e8", EB_RTC, NULL);
AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
AMBA_DEVICE(uart0, "dev:f1", EB_UART0, NULL);
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index 6a5bc3021bdb..ec71a6965786 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -48,8 +48,6 @@
#include <plat/mci.h>
#include <plat/udc.h>
-#include <plat/regs-serial.h>
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
@@ -275,6 +273,7 @@ static struct s3c2410_nand_set mini2440_nand_sets[] __initdata = {
.nr_chips = 1,
.nr_partitions = ARRAY_SIZE(mini2440_default_nand_part),
.partitions = mini2440_default_nand_part,
+ .flash_bbt = 1, /* we use u-boot to create a BBT */
},
};
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
index e23b581aa0e1..0fb385bd9cd9 100644
--- a/arch/arm/mach-s3c2442/mach-gta02.c
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -433,8 +433,7 @@ static struct s3c2410_nand_set gta02_nand_sets[] = {
*/
.name = "neo1973-nand",
.nr_chips = 1,
- .use_bbt = 1,
- .force_soft_ecc = 1,
+ .flash_bbt = 1,
},
};
diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c
index 5cd04d6751b3..111f7ea32b38 100644
--- a/arch/arm/mach-u300/clock.c
+++ b/arch/arm/mach-u300/clock.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/seq_file.h>
#include <asm/clkdev.h>
#include <mach/hardware.h>
@@ -702,6 +703,7 @@ static struct clk amba_clk = {
.rate = 52000000, /* this varies! */
.hw_ctrld = true,
.reset = false,
+ .lock = __SPIN_LOCK_UNLOCKED(amba_clk.lock),
};
/*
@@ -720,6 +722,7 @@ static struct clk cpu_clk = {
.set_rate = clk_set_rate_cpuclk,
.get_rate = clk_get_rate_cpuclk,
.round_rate = clk_round_rate_cpuclk,
+ .lock = __SPIN_LOCK_UNLOCKED(cpu_clk.lock),
};
static struct clk nandif_clk = {
@@ -732,6 +735,7 @@ static struct clk nandif_clk = {
.clk_val = U300_SYSCON_SBCER_NANDIF_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(nandif_clk.lock),
};
static struct clk semi_clk = {
@@ -744,6 +748,7 @@ static struct clk semi_clk = {
.clk_val = U300_SYSCON_SBCER_SEMI_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(semi_clk.lock),
};
#ifdef CONFIG_MACH_U300_BS335
@@ -758,6 +763,7 @@ static struct clk isp_clk = {
.clk_val = U300_SYSCON_SBCER_ISP_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(isp_clk.lock),
};
static struct clk cds_clk = {
@@ -771,6 +777,7 @@ static struct clk cds_clk = {
.clk_val = U300_SYSCON_SBCER_CDS_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(cds_clk.lock),
};
#endif
@@ -785,6 +792,7 @@ static struct clk dma_clk = {
.clk_val = U300_SYSCON_SBCER_DMAC_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(dma_clk.lock),
};
static struct clk aaif_clk = {
@@ -798,6 +806,7 @@ static struct clk aaif_clk = {
.clk_val = U300_SYSCON_SBCER_AAIF_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(aaif_clk.lock),
};
static struct clk apex_clk = {
@@ -811,6 +820,7 @@ static struct clk apex_clk = {
.clk_val = U300_SYSCON_SBCER_APEX_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(apex_clk.lock),
};
static struct clk video_enc_clk = {
@@ -825,6 +835,7 @@ static struct clk video_enc_clk = {
.clk_val = U300_SYSCON_SBCER_VIDEO_ENC_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(video_enc_clk.lock),
};
static struct clk xgam_clk = {
@@ -839,6 +850,7 @@ static struct clk xgam_clk = {
.get_rate = clk_get_rate_xgamclk,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(xgam_clk.lock),
};
/* This clock is used to activate the video encoder */
@@ -854,6 +866,7 @@ static struct clk ahb_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_ahb_clk,
+ .lock = __SPIN_LOCK_UNLOCKED(ahb_clk.lock),
};
@@ -871,6 +884,7 @@ static struct clk ahb_subsys_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_ahb_clk,
+ .lock = __SPIN_LOCK_UNLOCKED(ahb_subsys_clk.lock),
};
static struct clk intcon_clk = {
@@ -882,6 +896,8 @@ static struct clk intcon_clk = {
.res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
.res_mask = U300_SYSCON_RRR_INTCON_RESET_EN,
/* INTCON can be reset but not clock-gated */
+ .lock = __SPIN_LOCK_UNLOCKED(intcon_clk.lock),
+
};
static struct clk mspro_clk = {
@@ -895,6 +911,7 @@ static struct clk mspro_clk = {
.clk_val = U300_SYSCON_SBCER_MSPRO_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(mspro_clk.lock),
};
static struct clk emif_clk = {
@@ -909,6 +926,7 @@ static struct clk emif_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_emif_clk,
+ .lock = __SPIN_LOCK_UNLOCKED(emif_clk.lock),
};
@@ -926,6 +944,7 @@ static struct clk fast_clk = {
.clk_val = U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(fast_clk.lock),
};
static struct clk mmcsd_clk = {
@@ -942,6 +961,7 @@ static struct clk mmcsd_clk = {
.round_rate = clk_round_rate_mclk,
.disable = syscon_clk_disable,
.enable = syscon_clk_enable,
+ .lock = __SPIN_LOCK_UNLOCKED(mmcsd_clk.lock),
};
static struct clk i2s0_clk = {
@@ -956,6 +976,7 @@ static struct clk i2s0_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_i2s_i2c_spi,
+ .lock = __SPIN_LOCK_UNLOCKED(i2s0_clk.lock),
};
static struct clk i2s1_clk = {
@@ -970,6 +991,7 @@ static struct clk i2s1_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_i2s_i2c_spi,
+ .lock = __SPIN_LOCK_UNLOCKED(i2s1_clk.lock),
};
static struct clk i2c0_clk = {
@@ -984,6 +1006,7 @@ static struct clk i2c0_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_i2s_i2c_spi,
+ .lock = __SPIN_LOCK_UNLOCKED(i2c0_clk.lock),
};
static struct clk i2c1_clk = {
@@ -998,6 +1021,7 @@ static struct clk i2c1_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_i2s_i2c_spi,
+ .lock = __SPIN_LOCK_UNLOCKED(i2c1_clk.lock),
};
static struct clk spi_clk = {
@@ -1012,6 +1036,7 @@ static struct clk spi_clk = {
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.get_rate = clk_get_rate_i2s_i2c_spi,
+ .lock = __SPIN_LOCK_UNLOCKED(spi_clk.lock),
};
#ifdef CONFIG_MACH_U300_BS335
@@ -1026,6 +1051,7 @@ static struct clk uart1_clk = {
.clk_val = U300_SYSCON_SBCER_UART1_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(uart1_clk.lock),
};
#endif
@@ -1044,6 +1070,7 @@ static struct clk slow_clk = {
.clk_val = U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(slow_clk.lock),
};
/* TODO: implement SYSCON clock? */
@@ -1055,6 +1082,7 @@ static struct clk wdog_clk = {
.rate = 32768,
.reset = false,
/* This is always on, cannot be enabled/disabled or reset */
+ .lock = __SPIN_LOCK_UNLOCKED(wdog_clk.lock),
};
/* This one is hardwired to PLL13 */
@@ -1069,6 +1097,7 @@ static struct clk uart_clk = {
.clk_val = U300_SYSCON_SBCER_UART_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(uart_clk.lock),
};
static struct clk keypad_clk = {
@@ -1082,6 +1111,7 @@ static struct clk keypad_clk = {
.clk_val = U300_SYSCON_SBCER_KEYPAD_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(keypad_clk.lock),
};
static struct clk gpio_clk = {
@@ -1095,6 +1125,7 @@ static struct clk gpio_clk = {
.clk_val = U300_SYSCON_SBCER_GPIO_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(gpio_clk.lock),
};
static struct clk rtc_clk = {
@@ -1106,6 +1137,7 @@ static struct clk rtc_clk = {
.res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
.res_mask = U300_SYSCON_RSR_RTC_RESET_EN,
/* This clock is always on, cannot be enabled/disabled */
+ .lock = __SPIN_LOCK_UNLOCKED(rtc_clk.lock),
};
static struct clk bustr_clk = {
@@ -1119,6 +1151,7 @@ static struct clk bustr_clk = {
.clk_val = U300_SYSCON_SBCER_BTR_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(bustr_clk.lock),
};
static struct clk evhist_clk = {
@@ -1132,6 +1165,7 @@ static struct clk evhist_clk = {
.clk_val = U300_SYSCON_SBCER_EH_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(evhist_clk.lock),
};
static struct clk timer_clk = {
@@ -1145,6 +1179,7 @@ static struct clk timer_clk = {
.clk_val = U300_SYSCON_SBCER_ACC_TMR_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(timer_clk.lock),
};
static struct clk app_timer_clk = {
@@ -1158,6 +1193,7 @@ static struct clk app_timer_clk = {
.clk_val = U300_SYSCON_SBCER_APP_TMR_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(app_timer_clk.lock),
};
#ifdef CONFIG_MACH_U300_BS335
@@ -1172,6 +1208,7 @@ static struct clk ppm_clk = {
.clk_val = U300_SYSCON_SBCER_PPM_CLK_EN,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
+ .lock = __SPIN_LOCK_UNLOCKED(ppm_clk.lock),
};
#endif
@@ -1187,53 +1224,53 @@ static struct clk ppm_clk = {
*/
static struct clk_lookup lookups[] = {
/* Connected directly to the AMBA bus */
- DEF_LOOKUP("amba", &amba_clk),
- DEF_LOOKUP("cpu", &cpu_clk),
- DEF_LOOKUP("nandif", &nandif_clk),
- DEF_LOOKUP("semi", &semi_clk),
+ DEF_LOOKUP("amba", &amba_clk),
+ DEF_LOOKUP("cpu", &cpu_clk),
+ DEF_LOOKUP("fsmc", &nandif_clk),
+ DEF_LOOKUP("semi", &semi_clk),
#ifdef CONFIG_MACH_U300_BS335
- DEF_LOOKUP("isp", &isp_clk),
- DEF_LOOKUP("cds", &cds_clk),
+ DEF_LOOKUP("isp", &isp_clk),
+ DEF_LOOKUP("cds", &cds_clk),
#endif
- DEF_LOOKUP("dma", &dma_clk),
- DEF_LOOKUP("aaif", &aaif_clk),
- DEF_LOOKUP("apex", &apex_clk),
+ DEF_LOOKUP("dma", &dma_clk),
+ DEF_LOOKUP("msl", &aaif_clk),
+ DEF_LOOKUP("apex", &apex_clk),
DEF_LOOKUP("video_enc", &video_enc_clk),
- DEF_LOOKUP("xgam", &xgam_clk),
- DEF_LOOKUP("ahb", &ahb_clk),
+ DEF_LOOKUP("xgam", &xgam_clk),
+ DEF_LOOKUP("ahb", &ahb_clk),
/* AHB bridge clocks */
- DEF_LOOKUP("ahb", &ahb_subsys_clk),
- DEF_LOOKUP("intcon", &intcon_clk),
- DEF_LOOKUP("mspro", &mspro_clk),
- DEF_LOOKUP("pl172", &emif_clk),
+ DEF_LOOKUP("ahb_subsys", &ahb_subsys_clk),
+ DEF_LOOKUP("intcon", &intcon_clk),
+ DEF_LOOKUP("mspro", &mspro_clk),
+ DEF_LOOKUP("pl172", &emif_clk),
/* FAST bridge clocks */
- DEF_LOOKUP("fast", &fast_clk),
- DEF_LOOKUP("mmci", &mmcsd_clk),
+ DEF_LOOKUP("fast", &fast_clk),
+ DEF_LOOKUP("mmci", &mmcsd_clk),
/*
* The .0 and .1 identifiers on these comes from the platform device
* .id field and are assigned when the platform devices are registered.
*/
- DEF_LOOKUP("i2s.0", &i2s0_clk),
- DEF_LOOKUP("i2s.1", &i2s1_clk),
- DEF_LOOKUP("stddci2c.0", &i2c0_clk),
- DEF_LOOKUP("stddci2c.1", &i2c1_clk),
- DEF_LOOKUP("pl022", &spi_clk),
+ DEF_LOOKUP("i2s.0", &i2s0_clk),
+ DEF_LOOKUP("i2s.1", &i2s1_clk),
+ DEF_LOOKUP("stu300.0", &i2c0_clk),
+ DEF_LOOKUP("stu300.1", &i2c1_clk),
+ DEF_LOOKUP("pl022", &spi_clk),
#ifdef CONFIG_MACH_U300_BS335
- DEF_LOOKUP("uart1", &uart1_clk),
+ DEF_LOOKUP("uart1", &uart1_clk),
#endif
/* SLOW bridge clocks */
- DEF_LOOKUP("slow", &slow_clk),
- DEF_LOOKUP("wdog", &wdog_clk),
- DEF_LOOKUP("uart0", &uart_clk),
- DEF_LOOKUP("apptimer", &app_timer_clk),
- DEF_LOOKUP("keypad", &keypad_clk),
+ DEF_LOOKUP("slow", &slow_clk),
+ DEF_LOOKUP("coh901327_wdog", &wdog_clk),
+ DEF_LOOKUP("uart0", &uart_clk),
+ DEF_LOOKUP("apptimer", &app_timer_clk),
+ DEF_LOOKUP("coh901461-keypad", &keypad_clk),
DEF_LOOKUP("u300-gpio", &gpio_clk),
- DEF_LOOKUP("rtc0", &rtc_clk),
- DEF_LOOKUP("bustr", &bustr_clk),
- DEF_LOOKUP("evhist", &evhist_clk),
- DEF_LOOKUP("timer", &timer_clk),
+ DEF_LOOKUP("rtc-coh901331", &rtc_clk),
+ DEF_LOOKUP("bustr", &bustr_clk),
+ DEF_LOOKUP("evhist", &evhist_clk),
+ DEF_LOOKUP("timer", &timer_clk),
#ifdef CONFIG_MACH_U300_BS335
- DEF_LOOKUP("ppm", &ppm_clk),
+ DEF_LOOKUP("ppm", &ppm_clk),
#endif
};
@@ -1427,16 +1464,20 @@ static const struct file_operations u300_clocks_operations = {
.release = single_release,
};
-static void init_clk_read_procfs(void)
+static int __init init_clk_read_debugfs(void)
{
/* Expose a simple debugfs interface to view all clocks */
(void) debugfs_create_file("u300_clocks", S_IFREG | S_IRUGO,
- NULL, NULL, &u300_clocks_operations);
-}
-#else
-static inline void init_clk_read_procfs(void)
-{
+ NULL, NULL,
+ &u300_clocks_operations);
+ return 0;
}
+/*
+ * This needs to come in after the core_initcall() for the
+ * overall clocks, because debugfs is not available until
+ * the subsystems come up.
+ */
+module_init(init_clk_read_debugfs);
#endif
static int __init u300_clock_init(void)
@@ -1462,8 +1503,6 @@ static int __init u300_clock_init(void)
clk_register();
- init_clk_read_procfs();
-
/*
* Some of these may be on when we boot the system so make sure they
* are turned OFF.
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 69214fc8bd19..5bb46c05f9a8 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
+#include <linux/amba/pl061.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cnt32_to_63.h>
@@ -706,6 +707,16 @@ static struct clcd_board clcd_plat_data = {
.remove = versatile_clcd_remove,
};
+static struct pl061_platform_data gpio0_plat_data = {
+ .gpio_base = 0,
+ .irq_base = -1,
+};
+
+static struct pl061_platform_data gpio1_plat_data = {
+ .gpio_base = 8,
+ .irq_base = -1,
+};
+
#define AACI_IRQ { IRQ_AACI, NO_IRQ }
#define AACI_DMA { 0x80, 0x81 }
#define MMCI0_IRQ { IRQ_MMCI0A,IRQ_SIC_MMCI0B }
@@ -768,8 +779,8 @@ AMBA_DEVICE(clcd, "dev:20", CLCD, &clcd_plat_data);
AMBA_DEVICE(dmac, "dev:30", DMAC, NULL);
AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
AMBA_DEVICE(wdog, "dev:e1", WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", GPIO0, NULL);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, NULL);
+AMBA_DEVICE(gpio0, "dev:e4", GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
AMBA_DEVICE(rtc, "dev:e8", RTC, NULL);
AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
AMBA_DEVICE(uart0, "dev:f1", UART0, NULL);
diff --git a/arch/arm/mach-versatile/include/mach/gpio.h b/arch/arm/mach-versatile/include/mach/gpio.h
new file mode 100644
index 000000000000..306ab4c9f2aa
--- /dev/null
+++ b/arch/arm/mach-versatile/include/mach/gpio.h
@@ -0,0 +1 @@
+#include <asm-generic/gpio.h>
diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
index aa051c0884f8..c8394c3f5969 100644
--- a/arch/arm/mach-versatile/versatile_pb.c
+++ b/arch/arm/mach-versatile/versatile_pb.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
+#include <linux/amba/pl061.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -45,6 +46,16 @@ static struct mmc_platform_data mmc1_plat_data = {
.status = mmc_status,
};
+static struct pl061_platform_data gpio2_plat_data = {
+ .gpio_base = 16,
+ .irq_base = -1,
+};
+
+static struct pl061_platform_data gpio3_plat_data = {
+ .gpio_base = 24,
+ .irq_base = -1,
+};
+
#define UART3_IRQ { IRQ_SIC_UART3, NO_IRQ }
#define UART3_DMA { 0x86, 0x87 }
#define SCI1_IRQ { IRQ_SIC_SCI3, NO_IRQ }
@@ -70,8 +81,8 @@ AMBA_DEVICE(sci1, "fpga:0a", SCI1, NULL);
AMBA_DEVICE(mmc1, "fpga:0b", MMCI1, &mmc1_plat_data);
/* DevChip Primecells */
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, NULL);
-AMBA_DEVICE(gpio3, "dev:e7", GPIO3, NULL);
+AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
+AMBA_DEVICE(gpio3, "dev:e7", GPIO3, &gpio3_plat_data);
static struct amba_device *amba_devs[] __initdata = {
&uart3_device,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 83c025e72ceb..5fe595aeba69 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -758,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
default y
select OUTER_CACHE
help
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc74380..6bda76a43199 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
}
spin_unlock(&cpu_asid_lock);
- mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
mm->context.id = asid;
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c07222eb5ce0..d9773a63e151 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
return;
}
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
/* VIPT non-aliasing cache */
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
/* only flushing the kernel mapping on non-aliasing VIPT */
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 195e48edd8c2..ac5c80062b70 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,6 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
+EXPORT_SYMBOL(__cpuc_flush_dcache_page);
EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
#else
EXPORT_SYMBOL(cpu_cache);
diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c
index be4eefda4767..9395898dd49a 100644
--- a/arch/arm/plat-omap/debug-leds.c
+++ b/arch/arm/plat-omap/debug-leds.c
@@ -281,24 +281,27 @@ static int /* __init */ fpga_probe(struct platform_device *pdev)
return 0;
}
-static int fpga_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+static int fpga_suspend_noirq(struct device *dev)
{
__raw_writew(~0, &fpga->leds);
return 0;
}
-static int fpga_resume_early(struct platform_device *pdev)
+static int fpga_resume_noirq(struct device *dev)
{
__raw_writew(~hw_led_state, &fpga->leds);
return 0;
}
+static struct dev_pm_ops fpga_dev_pm_ops = {
+ .suspend_noirq = fpga_suspend_noirq,
+ .resume_noirq = fpga_resume_noirq,
+};
static struct platform_driver led_driver = {
.driver.name = "omap_dbg_led",
+ .driver.pm = &fpga_dev_pm_ops,
.probe = fpga_probe,
- .suspend_late = fpga_suspend_late,
- .resume_early = fpga_resume_early,
};
static int __init fpga_init(void)
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index def14ec265b3..7677a4a1cef2 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -2457,6 +2457,19 @@ static int __init omap_init_dma(void)
setup_irq(irq, &omap24xx_dma_irq);
}
+ /* Enable smartidle idlemodes and autoidle */
+ if (cpu_is_omap34xx()) {
+ u32 v = dma_read(OCP_SYSCONFIG);
+ v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
+ DMA_SYSCONFIG_SIDLEMODE_MASK |
+ DMA_SYSCONFIG_AUTOIDLE);
+ v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
+ DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
+ DMA_SYSCONFIG_AUTOIDLE);
+ dma_write(v , OCP_SYSCONFIG);
+ }
+
+
/* FIXME: Update LCD DMA to work on 24xx */
if (cpu_class_is_omap1()) {
r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 7fd89ba8d3b5..3d03337ad422 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1264,8 +1264,9 @@ static struct irq_chip mpuio_irq_chip = {
#include <linux/platform_device.h>
-static int omap_mpuio_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+static int omap_mpuio_suspend_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct gpio_bank *bank = platform_get_drvdata(pdev);
void __iomem *mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT;
unsigned long flags;
@@ -1278,8 +1279,9 @@ static int omap_mpuio_suspend_late(struct platform_device *pdev, pm_message_t me
return 0;
}
-static int omap_mpuio_resume_early(struct platform_device *pdev)
+static int omap_mpuio_resume_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct gpio_bank *bank = platform_get_drvdata(pdev);
void __iomem *mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT;
unsigned long flags;
@@ -1291,14 +1293,18 @@ static int omap_mpuio_resume_early(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops omap_mpuio_dev_pm_ops = {
+ .suspend_noirq = omap_mpuio_suspend_noirq,
+ .resume_noirq = omap_mpuio_resume_noirq,
+};
+
/* use platform_driver for this, now that there's no longer any
* point to sys_device (other than not disturbing old code).
*/
static struct platform_driver omap_mpuio_driver = {
- .suspend_late = omap_mpuio_suspend_late,
- .resume_early = omap_mpuio_resume_early,
.driver = {
.name = "mpuio",
+ .pm = &omap_mpuio_dev_pm_ops,
},
};
@@ -1585,6 +1591,7 @@ static int __init _omap_gpio_init(void)
__raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_IRQENABLE1);
__raw_writel(0xffffffff, bank->base + OMAP24XX_GPIO_IRQSTATUS1);
__raw_writew(0x0015, bank->base + OMAP24XX_GPIO_SYSCONFIG);
+ __raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_DEBOUNCE_EN);
/* Initialize interface clock ungated, module enabled */
__raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h
index fc60c4ebcc28..285eaa3a8275 100644
--- a/arch/arm/plat-omap/include/mach/cpu.h
+++ b/arch/arm/plat-omap/include/mach/cpu.h
@@ -30,6 +30,17 @@
#ifndef __ASM_ARCH_OMAP_CPU_H
#define __ASM_ARCH_OMAP_CPU_H
+/*
+ * Omap device type i.e. EMU/HS/TST/GP/BAD
+ */
+#define OMAP2_DEVICE_TYPE_TEST 0
+#define OMAP2_DEVICE_TYPE_EMU 1
+#define OMAP2_DEVICE_TYPE_SEC 2
+#define OMAP2_DEVICE_TYPE_GP 3
+#define OMAP2_DEVICE_TYPE_BAD 4
+
+int omap_type(void);
+
struct omap_chip_id {
u8 oc;
u8 type;
@@ -424,17 +435,6 @@ IS_OMAP_TYPE(3430, 0x3430)
int omap_chip_is(struct omap_chip_id oci);
-int omap_type(void);
-
-/*
- * Macro to detect device type i.e. EMU/HS/TST/GP/BAD
- */
-#define OMAP2_DEVICE_TYPE_TEST 0
-#define OMAP2_DEVICE_TYPE_EMU 1
-#define OMAP2_DEVICE_TYPE_SEC 2
-#define OMAP2_DEVICE_TYPE_GP 3
-#define OMAP2_DEVICE_TYPE_BAD 4
-
void omap2_check_revision(void);
#endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */
diff --git a/arch/arm/plat-omap/include/mach/dma.h b/arch/arm/plat-omap/include/mach/dma.h
index 8c1eae88737e..7b939cc01962 100644
--- a/arch/arm/plat-omap/include/mach/dma.h
+++ b/arch/arm/plat-omap/include/mach/dma.h
@@ -389,6 +389,21 @@
#define DMA_THREAD_FIFO_25 (0x02 << 14)
#define DMA_THREAD_FIFO_50 (0x03 << 14)
+/* DMA4_OCP_SYSCONFIG bits */
+#define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12)
+#define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8)
+#define DMA_SYSCONFIG_EMUFREE (1 << 5)
+#define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3)
+#define DMA_SYSCONFIG_SOFTRESET (1 << 2)
+#define DMA_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12)
+#define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3)
+
+#define DMA_IDLEMODE_SMARTIDLE 0x2
+#define DMA_IDLEMODE_NO_IDLE 0x1
+#define DMA_IDLEMODE_FORCE_IDLE 0x0
+
/* Chaining modes*/
#ifndef CONFIG_ARCH_OMAP1
#define OMAP_DMA_STATIC_CHAIN 0x1
diff --git a/arch/arm/plat-omap/include/mach/io.h b/arch/arm/plat-omap/include/mach/io.h
index 3b2814720569..73f483d56ca6 100644
--- a/arch/arm/plat-omap/include/mach/io.h
+++ b/arch/arm/plat-omap/include/mach/io.h
@@ -201,7 +201,7 @@
#define OMAP2_IO_ADDRESS(pa) IOMEM(__OMAP2_IO_ADDRESS(pa))
#ifdef __ASSEMBLER__
-#define IOMEM(x) x
+#define IOMEM(x) (x)
#else
#define IOMEM(x) ((void __force __iomem *)(x))
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 4cf449fa2cb5..4a0301399013 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -298,7 +298,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
if ((start <= da) && (da < start + bytes)) {
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
__func__, start, da, bytes);
-
+ iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 65006df3f1b7..4ea73804d21e 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -133,7 +133,12 @@ void __init omap_detect_sram(void)
if (cpu_is_omap34xx()) {
omap_sram_base = OMAP3_SRAM_PUB_VA;
omap_sram_start = OMAP3_SRAM_PUB_PA;
- omap_sram_size = 0x8000; /* 32K */
+ if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
+ (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
+ omap_sram_size = 0x7000; /* 28K */
+ } else {
+ omap_sram_size = 0x8000; /* 32K */
+ }
} else {
omap_sram_base = OMAP2_SRAM_PUB_VA;
omap_sram_start = OMAP2_SRAM_PUB_PA;
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile
index 74bb7cb5da49..0761766b1833 100644
--- a/arch/arm/plat-s3c/Makefile
+++ b/arch/arm/plat-s3c/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o
obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
-obj-$(CONFIG_SND_S3C24XX_SOC) += dev-audio.o
+obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += dev-audio.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h
index b5b9c4d46e9a..2e170827e0b0 100644
--- a/arch/arm/plat-s3c/include/plat/devs.h
+++ b/arch/arm/plat-s3c/include/plat/devs.h
@@ -37,6 +37,7 @@ extern struct platform_device s3c_device_i2c1;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
+extern struct platform_device s3c_device_iis;
extern struct platform_device s3c_device_hwmon;
extern struct platform_device s3c_device_hsmmc0;
extern struct platform_device s3c_device_hsmmc1;
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index 636cb12711df..579a165c2827 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += irq-pm.o
obj-$(CONFIG_PM) += sleep.o
-obj-$(CONFIG_HAVE_PWM) += pwm.o
+obj-$(CONFIG_S3C24XX_PWM) += pwm.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
obj-$(CONFIG_S3C2410_DMA) += dma.o
obj-$(CONFIG_S3C24XX_ADC) += adc.o
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
index 9edf7894eedd..da7a61728c18 100644
--- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
+++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
@@ -12,8 +12,7 @@
*/
#include <linux/kernel.h>
-
-#include <mach/hardware.h>
+#include <linux/gpio.h>
#include <mach/spi.h>
#include <mach/regs-gpio.h>
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
index f34d0fc69ad8..86b9edc67413 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
@@ -12,8 +12,7 @@
*/
#include <linux/kernel.h>
-
-#include <mach/hardware.h>
+#include <linux/gpio.h>
#include <mach/spi.h>
#include <mach/regs-gpio.h>
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index 7910d41eb886..b8324608ec0c 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -131,6 +131,7 @@ SECTIONS
/DISCARD/ : {
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
DWARF_DEBUG
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 6ac307ca0d80..6e8eabd8f0a6 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -280,5 +280,6 @@ SECTIONS
/DISCARD/ :
{
*(.exitcall.exit)
+ *(.discard)
}
}
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 0bc3c4ef0aad..99e4dbb1dfd1 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -42,9 +42,9 @@
#include <asm/mem_map.h>
#include "blackfin_sram.h"
-static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp;
-static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp;
-static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
+static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
+static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
/* the data structure for L1 scratchpad and DATA SRAM */
diff --git a/arch/cris/include/arch-v10/arch/mmu.h b/arch/cris/include/arch-v10/arch/mmu.h
index df84f1716e6b..e829e5a37bbe 100644
--- a/arch/cris/include/arch-v10/arch/mmu.h
+++ b/arch/cris/include/arch-v10/arch/mmu.h
@@ -33,10 +33,10 @@ typedef struct
/* CRIS PTE bits (see R_TLB_LO in the register description)
*
- * Bit: 31-13 12-------4 3 2 1 0
- * ________________________________________________
- * | pfn | reserved | global | valid | kernel | we |
- * |_____|__________|________|_______|________|_____|
+ * Bit: 31 30-13 12-------4 3 2 1 0
+ * _______________________________________________________
+ * | cache |pfn | reserved | global | valid | kernel | we |
+ * |_______|____|__________|________|_______|________|_____|
*
* (pfn = physical frame number)
*/
@@ -53,6 +53,7 @@ typedef struct
#define _PAGE_VALID (1<<2) /* page is valid */
#define _PAGE_SILENT_READ (1<<2) /* synonym */
#define _PAGE_GLOBAL (1<<3) /* global page - context is ignored */
+#define _PAGE_NO_CACHE (1<<31) /* part of the uncached memory map */
/* Bits the HW doesn't care about but the kernel uses them in SW */
diff --git a/arch/cris/include/arch-v32/arch/mmu.h b/arch/cris/include/arch-v32/arch/mmu.h
index 6bcdc3fdf7dc..a05b033c5c42 100644
--- a/arch/cris/include/arch-v32/arch/mmu.h
+++ b/arch/cris/include/arch-v32/arch/mmu.h
@@ -28,10 +28,10 @@ typedef struct
/*
* CRISv32 PTE bits:
*
- * Bit: 31-13 12-5 4 3 2 1 0
- * +-----+------+--------+-------+--------+-------+---------+
- * | pfn | zero | global | valid | kernel | write | execute |
- * +-----+------+--------+-------+--------+-------+---------+
+ * Bit: 31 30-13 12-5 4 3 2 1 0
+ * +-------+-----+------+--------+-------+--------+-------+---------+
+ * | cache | pfn | zero | global | valid | kernel | write | execute |
+ * +-------+-----+------+--------+-------+--------+-------+---------+
*/
/*
@@ -45,6 +45,8 @@ typedef struct
#define _PAGE_VALID (1 << 3) /* Page is valid. */
#define _PAGE_SILENT_READ (1 << 3) /* Same as above. */
#define _PAGE_GLOBAL (1 << 4) /* Global page. */
+#define _PAGE_NO_CACHE (1 <<31) /* part of the uncached memory map */
+
/*
* The hardware doesn't care about these bits, but the kernel uses them in
diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h
index 72ba08dcfd18..1d45fd6365b7 100644
--- a/arch/cris/include/asm/mmu_context.h
+++ b/arch/cris/include/asm/mmu_context.h
@@ -17,7 +17,8 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* registers like cr3 on the i386
*/
-extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */
+/* defined in arch/cris/mm/fault.c */
+DECLARE_PER_CPU(pgd_t *, current_pgd);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 50aa974aa834..1fcce00f01f4 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -197,6 +197,8 @@ static inline pte_t __mk_pte(void * page, pgprot_t pgprot)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE))
+
/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval
* __pte_page(pte_val) refers to the "virtual" DRAM interval
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 0d2adfc794d4..a3175ebb38cc 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -145,6 +145,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024;
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index f925115e3250..4a7cdd9ea1ee 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -29,7 +29,7 @@ extern void die_if_kernel(const char *, struct pt_regs *, long);
/* current active page directory */
-volatile DEFINE_PER_CPU(pgd_t *,current_pgd);
+DEFINE_PER_CPU(pgd_t *, current_pgd);
unsigned long cris_signal_return_page;
/*
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 8a5bd7a9c6f5..b86e19c9b5b0 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,6 +7,7 @@ config FRV
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
+ select HAVE_PERF_COUNTERS
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 0409d981fd39..00a57af79afc 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -121,10 +121,72 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+/*
+ * 64-bit atomic ops
+ */
+typedef struct {
+ volatile long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static inline long long atomic64_read(atomic64_t *v)
+{
+ long long counter;
+
+ asm("ldd%I1 %M1,%0"
+ : "=e"(counter)
+ : "m"(v->counter));
+ return counter;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ asm volatile("std%I0 %1,%M0"
+ : "=m"(v->counter)
+ : "e"(i));
+}
+
+extern long long atomic64_inc_return(atomic64_t *v);
+extern long long atomic64_dec_return(atomic64_t *v);
+extern long long atomic64_add_return(long long i, atomic64_t *v);
+extern long long atomic64_sub_return(long long i, atomic64_t *v);
+
+static inline long long atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+ atomic64_add_return(i, v);
+}
+
+static inline void atomic64_sub(long long i, atomic64_t *v)
+{
+ atomic64_sub_return(i, v);
+}
+
+static inline void atomic64_inc(atomic64_t *v)
+{
+ atomic64_inc_return(v);
+}
+
+static inline void atomic64_dec(atomic64_t *v)
+{
+ atomic64_dec_return(v);
+}
+
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
+
/*****************************************************************************/
/*
* exchange value with memory
*/
+extern uint64_t __xchg_64(uint64_t i, volatile void *v);
+
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define xchg(ptr, x) \
@@ -174,8 +236,10 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#define tas(ptr) (xchg((ptr), 1))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
+#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
+#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
diff --git a/arch/frv/include/asm/gdb-stub.h b/arch/frv/include/asm/gdb-stub.h
index 24f9738670bd..2da716407ff2 100644
--- a/arch/frv/include/asm/gdb-stub.h
+++ b/arch/frv/include/asm/gdb-stub.h
@@ -90,7 +90,6 @@ extern void gdbstub_do_rx(void);
extern asmlinkage void __debug_stub_init_break(void);
extern asmlinkage void __break_hijack_kernel_event(void);
extern asmlinkage void __break_hijack_kernel_event_breaks_here(void);
-extern asmlinkage void start_kernel(void);
extern asmlinkage void gdbstub_rx_handler(void);
extern asmlinkage void gdbstub_rx_irq(void);
diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_counter.h
new file mode 100644
index 000000000000..ccf726e61b2e
--- /dev/null
+++ b/arch/frv/include/asm/perf_counter.h
@@ -0,0 +1,17 @@
+/* FRV performance counter support
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PERF_COUNTER_H
+#define _ASM_PERF_COUNTER_H
+
+#define PERF_COUNTER_INDEX_OFFSET 0
+
+#endif /* _ASM_PERF_COUNTER_H */
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h
index 7742ec000cc4..efd22d9077ac 100644
--- a/arch/frv/include/asm/system.h
+++ b/arch/frv/include/asm/system.h
@@ -208,6 +208,8 @@ extern void free_initmem(void);
* - if (*ptr == test) then orig = *ptr; *ptr = test;
* - if (*ptr != test) then orig = *ptr;
*/
+extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
+
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define cmpxchg(ptr, test, new) \
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 96d78d5d2c41..4a8fb427ce0a 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -341,10 +341,12 @@
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
+#define __NR_rt_tgsigqueueinfo 335
+#define __NR_perf_counter_open 336
#ifdef __KERNEL__
-#define NR_syscalls 335
+#define NR_syscalls 337
#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 356e0e327a89..fde1e446b440 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1524,5 +1524,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_rt_tgsigqueueinfo /* 335 */
+ .long sys_perf_counter_open
syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 0316b3c50eff..a89803b58b9a 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -67,6 +67,10 @@ EXPORT_SYMBOL(atomic_sub_return);
EXPORT_SYMBOL(__xchg_32);
EXPORT_SYMBOL(__cmpxchg_32);
#endif
+EXPORT_SYMBOL(atomic64_add_return);
+EXPORT_SYMBOL(atomic64_sub_return);
+EXPORT_SYMBOL(__xchg_64);
+EXPORT_SYMBOL(__cmpxchg_64);
EXPORT_SYMBOL(__debug_bug_printk);
EXPORT_SYMBOL(__delay_loops_MHz);
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 22d9787406ed..64b5a5e4d35e 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -177,6 +177,8 @@ SECTIONS
.debug_ranges 0 : { *(.debug_ranges) }
.comment 0 : { *(.comment) }
+
+ /DISCARD/ : { *(.discard) }
}
__kernel_image_size_no_bss = __bss_start - __kernel_image_start;
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 08be305c9f44..0a377210c89b 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -4,5 +4,5 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
- checksum.o memcpy.o memset.o atomic-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index ee0ac905fb08..5e9e6ab5dd0e 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -163,11 +163,10 @@ __cmpxchg_32:
ld.p @(gr11,gr0),gr8
orcr cc7,cc7,cc3
subcc gr8,gr9,gr7,icc0
- bne icc0,#0,1f
+ bnelr icc0,#0
cst.p gr10,@(gr11,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1
beq icc3,#0,0b
-1:
bralr
.size __cmpxchg_32, .-__cmpxchg_32
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
new file mode 100644
index 000000000000..b6194eeac127
--- /dev/null
+++ b/arch/frv/lib/atomic64-ops.S
@@ -0,0 +1,162 @@
+/* kernel atomic64 operations
+ *
+ * For an explanation of how atomic ops work in this arch, see:
+ * Documentation/frv/atomic-ops.txt
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/spr-regs.h>
+
+ .text
+ .balign 4
+
+
+###############################################################################
+#
+# long long atomic64_inc_return(atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_inc_return
+ .type atomic64_inc_return,@function
+atomic64_inc_return:
+ or.p gr8,gr8,gr10
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ addicc gr9,#1,gr9,icc0
+ addxi gr8,#0,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_inc_return, .-atomic64_inc_return
+
+###############################################################################
+#
+# long long atomic64_dec_return(atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_dec_return
+ .type atomic64_dec_return,@function
+atomic64_dec_return:
+ or.p gr8,gr8,gr10
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ subicc gr9,#1,gr9,icc0
+ subxi gr8,#0,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_dec_return, .-atomic64_dec_return
+
+###############################################################################
+#
+# long long atomic64_add_return(long long i, atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_add_return
+ .type atomic64_add_return,@function
+atomic64_add_return:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ addcc gr9,gr5,gr9,icc0
+ addx gr8,gr4,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_add_return, .-atomic64_add_return
+
+###############################################################################
+#
+# long long atomic64_sub_return(long long i, atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_sub_return
+ .type atomic64_sub_return,@function
+atomic64_sub_return:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ subcc gr9,gr5,gr9,icc0
+ subx gr8,gr4,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_sub_return, .-atomic64_sub_return
+
+###############################################################################
+#
+# uint64_t __xchg_64(uint64_t i, uint64_t *v)
+#
+###############################################################################
+ .globl __xchg_64
+ .type __xchg_64,@function
+__xchg_64:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ cstd.p gr4,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size __xchg_64, .-__xchg_64
+
+###############################################################################
+#
+# uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v)
+#
+###############################################################################
+ .globl __cmpxchg_64
+ .type __cmpxchg_64,@function
+__cmpxchg_64:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr12,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3
+ subcc gr8,gr4,gr0,icc0
+ subcc.p gr9,gr5,gr0,icc1
+ bnelr icc0,#0
+ bnelr icc1,#0
+ cstd.p gr10,@(gr12,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size __cmpxchg_64, .-__cmpxchg_64
+
diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_counter.c
new file mode 100644
index 000000000000..2000feecd571
--- /dev/null
+++ b/arch/frv/lib/perf_counter.c
@@ -0,0 +1,19 @@
+/* Performance counter handling
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/perf_counter.h>
+
+/*
+ * mark the performance counter as pending
+ */
+void set_perf_counter_pending(void)
+{
+}
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 43d67534c712..566bdeb499d1 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -86,7 +86,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
struct pci_bus *bus;
struct pci_dev *dev;
int idx;
- struct resource *r, *pr;
+ struct resource *r;
/* Depth-First Search on bus tree */
for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
@@ -96,8 +96,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->start)
continue;
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0)
+ if (pci_claim_resource(dev, idx) < 0)
printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
}
}
@@ -110,7 +109,7 @@ static void __init pcibios_allocate_resources(int pass)
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
- struct resource *r, *pr;
+ struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -127,8 +126,7 @@ static void __init pcibios_allocate_resources(int pass)
if (pass == disabled) {
DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
r->start, r->end, r->flags, disabled, pass);
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
/* We'll assign a new address later */
r->end -= r->start;
diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h
index 97389b35aa35..cc9762091c0a 100644
--- a/arch/h8300/include/asm/pci.h
+++ b/arch/h8300/include/asm/pci.h
@@ -8,7 +8,6 @@
*/
#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
static inline void pcibios_set_master(struct pci_dev *dev)
{
diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c
index e7c6e614a758..2193a2e2859a 100644
--- a/arch/h8300/kernel/timer/tpu.c
+++ b/arch/h8300/kernel/timer/tpu.c
@@ -7,7 +7,6 @@
*
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 43a87b9085b6..03d6c0df33db 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -154,6 +154,7 @@ SECTIONS
}
/DISCARD/ : {
*(.exitcall.exit)
+ *(.discard)
}
.romfs :
{
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 170042b420d4..328d2f8b8c3f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -89,6 +89,9 @@ config GENERIC_TIME_VSYSCALL
bool
default y
+config HAVE_LEGACY_PER_CPU_AREA
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool y
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 41ab85d66f33..d66d446b127c 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -15,4 +15,7 @@ struct dev_archdata {
#endif
};
+struct pdev_archdata {
+};
+
#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 5f43697aed30..d9b6325a9328 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -235,7 +235,8 @@ struct kvm_vm_data {
#define KVM_REQ_PTC_G 32
#define KVM_REQ_RESUME 33
-#define KVM_PAGES_PER_HPAGE 1
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) 1
struct kvm;
struct kvm_vcpu;
@@ -465,7 +466,6 @@ struct kvm_arch {
unsigned long metaphysical_rr4;
unsigned long vmm_init_rr;
- int online_vcpus;
int is_sn2;
struct kvm_ioapic *vioapic;
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index fcfca56bb850..55281aabe5f2 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -17,7 +17,6 @@
* loader.
*/
#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -135,7 +134,18 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev,
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res, struct pci_bus_region *region);
-#define pcibios_scan_all_fns(a, b) 0
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d4e051..0b3b3997decd 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 7b4c8c70b2d1..9ac48c34daac 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/*
@@ -103,8 +102,6 @@ void build_cpu_to_node_map(void);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index ebf4e988e78c..d5764a3d74af 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -65,7 +65,7 @@ static int __init esi_init (void)
}
if (!esi)
- return -ENODEV;;
+ return -ENODEV;
systab = __va(esi);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index abce2468a40b..f1782705b1f7 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only
*/
-#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
+#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
static void *
pfm_proc_start(struct seq_file *m, loff_t *pos)
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 7053c55b7649..e6676fca4828 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -192,7 +192,7 @@ struct salinfo_platform_oemdata_parms {
static void
salinfo_work_to_do(struct salinfo_data *data)
{
- down_trylock(&data->mutex);
+ (void)(down_trylock(&data->mutex) ?: 0);
up(&data->mutex);
}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f0c521b0ba4c..dabeefe21134 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -58,7 +58,8 @@ static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
-static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
+ shadow_flush_counts);
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
@@ -301,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
- smp_call_function_mask(mm->cpu_vm_mask,
+ smp_call_function_many(mm_cpumask(mm),
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
local_irq_disable();
local_finish_flush_tlb_mm(mm);
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 4a95e86b9ac2..13d958975874 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -29,6 +29,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
*(.IA_64.unwind.exit.text)
*(.IA_64.unwind_info.exit.text)
}
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 64d520937874..ef3e7be29caf 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -1,12 +1,8 @@
#
# KVM configuration
#
-config HAVE_KVM
- bool
-config HAVE_KVM_IRQCHIP
- bool
- default y
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -28,6 +24,8 @@ config KVM
depends on PCI
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_IRQCHIP
+ select KVM_APIC_ARCHITECTURE
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -49,9 +47,6 @@ config KVM_INTEL
Provides support for KVM on Itanium 2 processors equipped with the VT
extensions.
-config KVM_TRACE
- bool
-
source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 80c57b0a21c4..8054d7b1ca12 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -210,16 +210,6 @@ int kvm_dev_ioctl_check_extension(long ext)
}
-static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
- gpa_t addr, int len, int is_write)
-{
- struct kvm_io_device *dev;
-
- dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
-
- return dev;
-}
-
static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -231,6 +221,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct kvm_mmio_req *p;
struct kvm_io_device *mmio_dev;
+ int r;
p = kvm_get_vcpu_ioreq(vcpu);
@@ -247,16 +238,13 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
mmio:
- mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
- if (mmio_dev) {
- if (!p->dir)
- kvm_iodevice_write(mmio_dev, p->addr, p->size,
- &p->data);
- else
- kvm_iodevice_read(mmio_dev, p->addr, p->size,
- &p->data);
-
- } else
+ if (p->dir)
+ r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
+ p->size, &p->data);
+ else
+ r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
+ p->size, &p->data);
+ if (r)
printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
p->state = STATE_IORESP_READY;
@@ -337,13 +325,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
{
union ia64_lid lid;
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (kvm->vcpus[i]) {
- lid.val = VCPU_LID(kvm->vcpus[i]);
- if (lid.id == id && lid.eid == eid)
- return kvm->vcpus[i];
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ lid.val = VCPU_LID(vcpu);
+ if (lid.id == id && lid.eid == eid)
+ return vcpu;
}
return NULL;
@@ -409,21 +396,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+ struct kvm_vcpu *vcpui;
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
- KVM_MP_STATE_UNINITIALIZED ||
- vcpu == kvm->vcpus[i])
+ kvm_for_each_vcpu(i, vcpui, kvm) {
+ if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == vcpui)
continue;
- if (waitqueue_active(&kvm->vcpus[i]->wq))
- wake_up_interruptible(&kvm->vcpus[i]->wq);
+ if (waitqueue_active(&vcpui->wq))
+ wake_up_interruptible(&vcpui->wq);
- if (kvm->vcpus[i]->cpu != -1) {
- call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (vcpui->cpu != -1) {
+ call_data.vcpu = vcpui;
+ smp_call_function_single(vcpui->cpu,
vcpu_global_purge, &call_data, 1);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
@@ -852,8 +839,6 @@ struct kvm *kvm_arch_create_vm(void)
kvm_init_vm(kvm);
- kvm->arch.online_vcpus = 0;
-
return kvm;
}
@@ -1216,7 +1201,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (IS_ERR(vmm_vcpu))
return PTR_ERR(vmm_vcpu);
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
/*Set entry address for first run.*/
@@ -1224,7 +1209,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/
itc_offset = 0UL - kvm_get_itc(vcpu);
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset;
@@ -1356,8 +1341,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail;
}
- kvm->arch.online_vcpus++;
-
return vcpu;
fail:
return ERR_PTR(r);
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
index a85cb611ecd7..f1268b8e6f9e 100644
--- a/arch/ia64/kvm/kvm_lib.c
+++ b/arch/ia64/kvm/kvm_lib.c
@@ -11,5 +11,11 @@
*
*/
#undef CONFIG_MODULES
+#include <linux/module.h>
+#undef CONFIG_KALLSYMS
+#undef EXPORT_SYMBOL
+#undef EXPORT_SYMBOL_GPL
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
#include "../../../lib/vsprintf.c"
#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index a8f84da04b49..bb862fb224f2 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -130,7 +130,7 @@ static void collect_interruption(struct kvm_vcpu *vcpu)
if (vdcr & IA64_DCR_PP) {
vpsr |= IA64_PSR_PP;
} else {
- vpsr &= ~IA64_PSR_PP;;
+ vpsr &= ~IA64_PSR_PP;
}
vcpu_set_psr(vcpu, vpsr);
@@ -594,11 +594,11 @@ static void set_pal_call_data(struct kvm_vcpu *vcpu)
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
break;
case PAL_BRAND_INFO:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
break;
default:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
}
p->u.pal_data.gr28 = gr28;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a2c6c15e4761..05331a065adc 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -406,7 +406,7 @@ void getreg(unsigned long regnum, unsigned long *val,
* Now look at registers in [0-31] range and init correct UNAT
*/
addr = (unsigned long)regs;
- unat = &regs->eml_unat;;
+ unat = &regs->eml_unat;
addr += gr_info[regnum];
@@ -830,8 +830,8 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
kvm = (struct kvm *)KVM_VM_BASE;
- if (vcpu->vcpu_id == 0) {
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset;
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4290a429bf7c..20b3852f7a6e 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -135,7 +135,7 @@ struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
u64 rid;
rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;;
+ rid = rid & RR_RID_MASK;
if (type == D_TLB) {
if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
@@ -518,7 +518,7 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
struct thash_cb *hcb = &v->arch.vtlb;
- cch = __vtr_lookup(v, va, is_data);;
+ cch = __vtr_lookup(v, va, is_data);
if (cch)
return cch;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 729298f4b234..7de76dd352fe 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -537,7 +537,7 @@ pcibios_align_resource (void *data, struct resource *res,
/*
* PCI BIOS setup, always defaults to SAL interface
*/
-char * __devinit
+char * __init
pcibios_setup (char *str)
{
return str;
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 76645cf6ac5d..25831c47c579 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -435,7 +435,8 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
bricktype = MODULE_GET_BTYPE(moduleid);
if ((bricktype == L1_BRICKTYPE_191010) ||
(bricktype == L1_BRICKTYPE_1932))
- sprintf(address, "%s^%d", address, geo_slot(geoid));
+ sprintf(address + strlen(address), "^%d",
+ geo_slot(geoid));
}
void __devinit
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e456f062f241..ece1bf994499 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d0..a735cc567931 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
if (prev != next) {
#ifdef CONFIG_SMP
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
#endif /* CONFIG_SMP */
/* Set MPTB = next->pgd */
*(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
}
#ifdef CONFIG_SMP
else
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))
activate_context(next);
#endif /* CONFIG_SMP */
}
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index b96a6d2ffbc3..e67ded1aab91 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -88,7 +88,7 @@ extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* not __ASSEMBLY__ */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad9..e6ec3cc12854 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
static void send_IPI_allbutself(int, int);
-static void send_IPI_mask(cpumask_t, int, int);
+static void send_IPI_mask(const struct cpumask *, int, int);
unsigned long send_IPI_mask_phys(cpumask_t, int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
void smp_send_reschedule(int cpu_id)
{
WARN_ON(cpu_is_offline(cpu_id));
- send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
+ send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
}
/*==========================================================================*
@@ -168,7 +168,7 @@ void smp_flush_cache_all(void)
spin_lock(&flushcache_lock);
mask=cpus_addr(cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
- send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
mb();
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
+ cpu_mask = *mm_cpumask(mm);
cpu_clear(cpu_id, cpu_mask);
if (*mmc != NO_CONTEXT) {
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
if (mm == current->mm)
activate_context(mm);
else
- cpu_clear(cpu_id, mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
local_irq_restore(flags);
}
if (!cpus_empty(cpu_mask))
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
+ cpu_mask = *mm_cpumask(mm);
cpu_clear(cpu_id, cpu_mask);
#ifdef DEBUG_SMP
@@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
while (!cpus_empty(flush_cpumask)) {
/* nothing. lockup detection does not belong here */
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
if (flush_mm == current->active_mm)
activate_context(flush_mm);
else
- cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
+ cpumask_clear(cpu_id, mm_cpumask(flush_mm));
} else {
unsigned long va = flush_va;
@@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy)
for ( ; ; );
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
/*==========================================================================*
@@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
cpumask = cpu_online_map;
cpu_clear(smp_processor_id(), cpumask);
- send_IPI_mask(cpumask, ipi_num, try);
+ send_IPI_mask(&cpumask, ipi_num, try);
}
/*==========================================================================*
@@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
+static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
{
cpumask_t physid_mask, tmp;
int cpu_id, phys_id;
@@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
if (num_cpus <= 1) /* NO MP */
return;
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(cpumask, tmp));
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ BUG_ON(!cpumask_equal(cpumask, &tmp));
physid_mask = CPU_MASK_NONE;
- for_each_cpu_mask(cpu_id, cpumask){
+ for_each_cpu(cpu_id, cpumask) {
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
cpu_set(phys_id, physid_mask);
}
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 2547d6c4a827..ffc3bbcfd3bb 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
show_mp_info(nr_cpu);
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 4179adf6c624..480a49944cfd 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -125,6 +125,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
/* Stabs debugging sections. */
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index 1cf544767453..87cabc334249 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -1,5 +1,159 @@
-#ifdef __uClinux__
-#include "checksum_no.h"
+#ifndef _M68K_CHECKSUM_H
+#define _M68K_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+extern __wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst,
+ int len, __wsum sum,
+ int *csum_err);
+
+extern __wsum csum_partial_copy_nocheck(const void *src,
+ void *dst, int len,
+ __wsum sum);
+
+
+#ifdef CONFIG_MMU
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum = 0;
+ unsigned long tmp;
+
+ __asm__ ("subqw #1,%2\n"
+ "1:\t"
+ "movel %1@+,%3\n\t"
+ "addxl %3,%0\n\t"
+ "dbra %2,1b\n\t"
+ "movel %0,%3\n\t"
+ "swap %3\n\t"
+ "addxw %3,%0\n\t"
+ "clrw %3\n\t"
+ "addxw %3,%0\n\t"
+ : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
+ : "0" (sum), "1" (iph), "2" (ihl)
+ : "memory");
+ return (__force __sum16)~sum;
+}
#else
-#include "checksum_mm.h"
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
#endif
+
+/*
+ * Fold a partial checksum
+ */
+
+static inline __sum16 csum_fold(__wsum sum)
+{
+ unsigned int tmp = (__force u32)sum;
+#ifdef CONFIG_COLDFIRE
+ tmp = (tmp & 0xffff) + (tmp >> 16);
+ tmp = (tmp & 0xffff) + (tmp >> 16);
+ return (__force __sum16)~tmp;
+#else
+ __asm__("swap %1\n\t"
+ "addw %1, %0\n\t"
+ "clrw %1\n\t"
+ "addxw %1, %0"
+ : "=&d" (sum), "=&d" (tmp)
+ : "0" (sum), "1" (tmp));
+ return (__force __sum16)~sum;
+#endif
+}
+
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__ ("addl %2,%0\n\t"
+ "addxl %3,%0\n\t"
+ "addxl %4,%0\n\t"
+ "clrl %1\n\t"
+ "addxl %1,%0"
+ : "=&d" (sum), "=d" (saddr)
+ : "g" (daddr), "1" (saddr), "d" (len + proto),
+ "0" (sum));
+ return sum;
+}
+
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+ __u32 len, unsigned short proto, __wsum sum)
+{
+ register unsigned long tmp;
+ __asm__("addl %2@,%0\n\t"
+ "movel %2@(4),%1\n\t"
+ "addxl %1,%0\n\t"
+ "movel %2@(8),%1\n\t"
+ "addxl %1,%0\n\t"
+ "movel %2@(12),%1\n\t"
+ "addxl %1,%0\n\t"
+ "movel %3@,%1\n\t"
+ "addxl %1,%0\n\t"
+ "movel %3@(4),%1\n\t"
+ "addxl %1,%0\n\t"
+ "movel %3@(8),%1\n\t"
+ "addxl %1,%0\n\t"
+ "movel %3@(12),%1\n\t"
+ "addxl %1,%0\n\t"
+ "addxl %4,%0\n\t"
+ "clrl %1\n\t"
+ "addxl %1,%0"
+ : "=&d" (sum), "=&d" (tmp)
+ : "a" (saddr), "a" (daddr), "d" (len + proto),
+ "0" (sum));
+
+ return csum_fold(sum);
+}
+
+#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/checksum_mm.h b/arch/m68k/include/asm/checksum_mm.h
deleted file mode 100644
index 494f9aec37ea..000000000000
--- a/arch/m68k/include/asm/checksum_mm.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef _M68K_CHECKSUM_H
-#define _M68K_CHECKSUM_H
-
-#include <linux/in6.h>
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src,
- void *dst,
- int len, __wsum sum,
- int *csum_err);
-
-extern __wsum csum_partial_copy_nocheck(const void *src,
- void *dst, int len,
- __wsum sum);
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- *
- */
-static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
- unsigned int sum = 0;
- unsigned long tmp;
-
- __asm__ ("subqw #1,%2\n"
- "1:\t"
- "movel %1@+,%3\n\t"
- "addxl %3,%0\n\t"
- "dbra %2,1b\n\t"
- "movel %0,%3\n\t"
- "swap %3\n\t"
- "addxw %3,%0\n\t"
- "clrw %3\n\t"
- "addxw %3,%0\n\t"
- : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
- : "0" (sum), "1" (iph), "2" (ihl)
- : "memory");
- return (__force __sum16)~sum;
-}
-
-/*
- * Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
- unsigned int tmp = (__force u32)sum;
- __asm__("swap %1\n\t"
- "addw %1, %0\n\t"
- "clrw %1\n\t"
- "addxw %1, %0"
- : "=&d" (sum), "=&d" (tmp)
- : "0" (sum), "1" (tmp));
- return (__force __sum16)~sum;
-}
-
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- __asm__ ("addl %2,%0\n\t"
- "addxl %3,%0\n\t"
- "addxl %4,%0\n\t"
- "clrl %1\n\t"
- "addxl %1,%0"
- : "=&d" (sum), "=d" (saddr)
- : "g" (daddr), "1" (saddr), "d" (len + proto),
- "0" (sum));
- return sum;
-}
-
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-static inline __sum16 ip_compute_csum(const void *buff, int len)
-{
- return csum_fold (csum_partial(buff, len, 0));
-}
-
-#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16
-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
- __u32 len, unsigned short proto, __wsum sum)
-{
- register unsigned long tmp;
- __asm__("addl %2@,%0\n\t"
- "movel %2@(4),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %2@(8),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %2@(12),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@,%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@(4),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@(8),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@(12),%1\n\t"
- "addxl %1,%0\n\t"
- "addxl %4,%0\n\t"
- "clrl %1\n\t"
- "addxl %1,%0"
- : "=&d" (sum), "=&d" (tmp)
- : "a" (saddr), "a" (daddr), "d" (len + proto),
- "0" (sum));
-
- return csum_fold(sum);
-}
-
-#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/checksum_no.h b/arch/m68k/include/asm/checksum_no.h
deleted file mode 100644
index 81883482ffb1..000000000000
--- a/arch/m68k/include/asm/checksum_no.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _M68K_CHECKSUM_H
-#define _M68K_CHECKSUM_H
-
-#include <linux/in6.h>
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src,
- void *dst, int len, __wsum sum, int *csum_err);
-
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-/*
- * Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
- unsigned int tmp = (__force u32)sum;
-#ifdef CONFIG_COLDFIRE
- tmp = (tmp & 0xffff) + (tmp >> 16);
- tmp = (tmp & 0xffff) + (tmp >> 16);
- return (__force __sum16)~tmp;
-#else
- __asm__("swap %1\n\t"
- "addw %1, %0\n\t"
- "clrw %1\n\t"
- "addxw %1, %0"
- : "=&d" (sum), "=&d" (tmp)
- : "0" (sum), "1" (sum));
- return (__force __sum16)~sum;
-#endif
-}
-
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- __asm__ ("addl %1,%0\n\t"
- "addxl %4,%0\n\t"
- "addxl %5,%0\n\t"
- "clrl %1\n\t"
- "addxl %1,%0"
- : "=&d" (sum), "=&d" (saddr)
- : "0" (daddr), "1" (saddr), "d" (len + proto),
- "d"(sum));
- return sum;
-}
-
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-extern __sum16 ip_compute_csum(const void *buff, int len);
-
-#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16
-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
- __u32 len, unsigned short proto, __wsum sum)
-{
- register unsigned long tmp;
- __asm__("addl %2@,%0\n\t"
- "movel %2@(4),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %2@(8),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %2@(12),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@,%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@(4),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@(8),%1\n\t"
- "addxl %1,%0\n\t"
- "movel %3@(12),%1\n\t"
- "addxl %1,%0\n\t"
- "addxl %4,%0\n\t"
- "clrl %1\n\t"
- "addxl %1,%0"
- : "=&d" (sum), "=&d" (tmp)
- : "a" (saddr), "a" (daddr), "d" (len + proto),
- "0" (sum));
-
- return csum_fold(sum);
-}
-
-#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index b82e660cf1c2..7cf7066d03b7 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -1,5 +1,491 @@
-#ifdef __uClinux__
-#include "dma_no.h"
+#ifndef _M68K_DMA_H
+#define _M68K_DMA_H 1
+
+#ifdef CONFIG_COLDFIRE
+/*
+ * ColdFire DMA Model:
+ * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
+ * address mode emits a source address, and expects that the device will either
+ * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
+ * the device will place data on the correct byte(s) of the data bus, as the
+ * memory transactions are always 32 bits. This implies that only 32 bit
+ * devices will find single mode transfers useful. Dual address DMA mode
+ * performs two cycles: source read and destination write. ColdFire will
+ * align the data so that the device will always get the correct bytes, thus
+ * is useful for 8 and 16 bit devices. This is the mode that is supported
+ * below.
+ *
+ * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
+ * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
+ *
+ * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
+ *
+ * APR/18/2002 : added proper support for MCF5272 DMA controller.
+ * Arthur Shipkowski (art@videon-central.com)
+ */
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfdma.h>
+
+/*
+ * Set number of channels of DMA on ColdFire for different implementations.
+ */
+#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
+ defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
+#define MAX_M68K_DMA_CHANNELS 4
+#elif defined(CONFIG_M5272)
+#define MAX_M68K_DMA_CHANNELS 1
+#elif defined(CONFIG_M532x)
+#define MAX_M68K_DMA_CHANNELS 0
#else
-#include "dma_mm.h"
+#define MAX_M68K_DMA_CHANNELS 2
#endif
+
+extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
+extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
+
+#if !defined(CONFIG_M5272)
+#define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
+#define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
+#define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
+#define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
+
+/* I/O to memory, 8 bits, mode */
+#define DMA_MODE_READ 0
+/* memory to I/O, 8 bits, mode */
+#define DMA_MODE_WRITE 1
+/* I/O to memory, 16 bits, mode */
+#define DMA_MODE_READ_WORD 2
+/* memory to I/O, 16 bits, mode */
+#define DMA_MODE_WRITE_WORD 3
+/* I/O to memory, 32 bits, mode */
+#define DMA_MODE_READ_LONG 4
+/* memory to I/O, 32 bits, mode */
+#define DMA_MODE_WRITE_LONG 5
+/* I/O to memory, 8 bits, single-address-mode */
+#define DMA_MODE_READ_SINGLE 8
+/* memory to I/O, 8 bits, single-address-mode */
+#define DMA_MODE_WRITE_SINGLE 9
+/* I/O to memory, 16 bits, single-address-mode */
+#define DMA_MODE_READ_WORD_SINGLE 10
+/* memory to I/O, 16 bits, single-address-mode */
+#define DMA_MODE_WRITE_WORD_SINGLE 11
+/* I/O to memory, 32 bits, single-address-mode */
+#define DMA_MODE_READ_LONG_SINGLE 12
+/* memory to I/O, 32 bits, single-address-mode */
+#define DMA_MODE_WRITE_LONG_SINGLE 13
+
+#else /* CONFIG_M5272 is defined */
+
+/* Source static-address mode */
+#define DMA_MODE_SRC_SA_BIT 0x01
+/* Two bits to select between all four modes */
+#define DMA_MODE_SSIZE_MASK 0x06
+/* Offset to shift bits in */
+#define DMA_MODE_SSIZE_OFF 0x01
+/* Destination static-address mode */
+#define DMA_MODE_DES_SA_BIT 0x10
+/* Two bits to select between all four modes */
+#define DMA_MODE_DSIZE_MASK 0x60
+/* Offset to shift bits in */
+#define DMA_MODE_DSIZE_OFF 0x05
+/* Size modifiers */
+#define DMA_MODE_SIZE_LONG 0x00
+#define DMA_MODE_SIZE_BYTE 0x01
+#define DMA_MODE_SIZE_WORD 0x02
+#define DMA_MODE_SIZE_LINE 0x03
+
+/*
+ * Aliases to help speed quick ports; these may be suboptimal, however. They
+ * do not include the SINGLE mode modifiers since the MCF5272 does not have a
+ * mode where the device is in control of its addressing.
+ */
+
+/* I/O to memory, 8 bits, mode */
+#define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
+/* memory to I/O, 8 bits, mode */
+#define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
+/* I/O to memory, 16 bits, mode */
+#define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
+/* memory to I/O, 16 bits, mode */
+#define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
+/* I/O to memory, 32 bits, mode */
+#define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
+/* memory to I/O, 32 bits, mode */
+#define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
+
+#endif /* !defined(CONFIG_M5272) */
+
+#if !defined(CONFIG_M5272)
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ volatile unsigned short *dmawp;
+
+#ifdef DMA_DEBUG
+ printk("enable_dma(dmanr=%d)\n", dmanr);
+#endif
+
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+ dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ volatile unsigned short *dmawp;
+ volatile unsigned char *dmapb;
+
+#ifdef DMA_DEBUG
+ printk("disable_dma(dmanr=%d)\n", dmanr);
+#endif
+
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+ dmapb = (unsigned char *) dma_base_addr[dmanr];
+
+ /* Turn off external requests, and stop any DMA in progress */
+ dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
+ dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
+}
+
+/*
+ * Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ *
+ * This is a NOP for ColdFire. Provide a stub for compatibility.
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+
+ volatile unsigned char *dmabp;
+ volatile unsigned short *dmawp;
+
+#ifdef DMA_DEBUG
+ printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
+#endif
+
+ dmabp = (unsigned char *) dma_base_addr[dmanr];
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+
+ // Clear config errors
+ dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
+
+ // Set command register
+ dmawp[MCFDMA_DCR] =
+ MCFDMA_DCR_INT | // Enable completion irq
+ MCFDMA_DCR_CS | // Force one xfer per request
+ MCFDMA_DCR_AA | // Enable auto alignment
+ // single-address-mode
+ ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
+ // sets s_rw (-> r/w) high if Memory to I/0
+ ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
+ // Memory to I/O or I/O to Memory
+ ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
+ // 32 bit, 16 bit or 8 bit transfers
+ ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
+ ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
+ MCFDMA_DCR_SSIZE_BYTE)) |
+ ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
+ ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
+ MCFDMA_DCR_DSIZE_BYTE));
+
+#ifdef DEBUG_DMA
+ printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
+ dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
+ (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
+#endif
+}
+
+/* Set transfer address for specific DMA channel */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ volatile unsigned short *dmawp;
+ volatile unsigned int *dmalp;
+
+#ifdef DMA_DEBUG
+ printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
+#endif
+
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+
+ // Determine which address registers are used for memory/device accesses
+ if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
+ // Source incrementing, must be memory
+ dmalp[MCFDMA_SAR] = a;
+ // Set dest address, must be device
+ dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
+ } else {
+ // Destination incrementing, must be memory
+ dmalp[MCFDMA_DAR] = a;
+ // Set source address, must be device
+ dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
+ }
+
+#ifdef DEBUG_DMA
+ printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
+ __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
+ (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
+ (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
+#endif
+}
+
+/*
+ * Specific for Coldfire - sets device address.
+ * Should be called after the mode set call, and before set DMA address.
+ */
+static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
+{
+#ifdef DMA_DEBUG
+ printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
+#endif
+
+ dma_device_address[dmanr] = a;
+}
+
+/*
+ * NOTE 2: "count" represents _bytes_.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ volatile unsigned short *dmawp;
+
+#ifdef DMA_DEBUG
+ printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
+#endif
+
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+ dmawp[MCFDMA_BCR] = (unsigned short)count;
+}
+
+/*
+ * Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ volatile unsigned short *dmawp;
+ unsigned short count;
+
+#ifdef DMA_DEBUG
+ printk("get_dma_residue(dmanr=%d)\n", dmanr);
+#endif
+
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+ count = dmawp[MCFDMA_BCR];
+ return((int) count);
+}
+#else /* CONFIG_M5272 is defined */
+
+/*
+ * The MCF5272 DMA controller is very different than the controller defined above
+ * in terms of register mapping. For instance, with the exception of the 16-bit
+ * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
+ *
+ * The big difference, however, is the lack of device-requested DMA. All modes
+ * are dual address transfer, and there is no 'device' setup or direction bit.
+ * You can DMA between a device and memory, between memory and memory, or even between
+ * two devices directly, with any combination of incrementing and non-incrementing
+ * addresses you choose. This puts a crimp in distinguishing between the 'device
+ * address' set up by set_dma_device_addr.
+ *
+ * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
+ * which will act exactly as above in -- it will look to see if the source is set to
+ * autoincrement, and if so it will make the source use the set_dma_addr value and the
+ * destination the set_dma_device_addr value. Otherwise the source will be set to the
+ * set_dma_device_addr value and the destination will get the set_dma_addr value.
+ *
+ * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
+ * and make it explicit. Depending on what you're doing, one of these two should work
+ * for you, but don't mix them in the same transfer setup.
+ */
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ volatile unsigned int *dmalp;
+
+#ifdef DMA_DEBUG
+ printk("enable_dma(dmanr=%d)\n", dmanr);
+#endif
+
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+ dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ volatile unsigned int *dmalp;
+
+#ifdef DMA_DEBUG
+ printk("disable_dma(dmanr=%d)\n", dmanr);
+#endif
+
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+
+ /* Turn off external requests, and stop any DMA in progress */
+ dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
+ dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
+}
+
+/*
+ * Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ *
+ * This is a NOP for ColdFire. Provide a stub for compatibility.
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+
+ volatile unsigned int *dmalp;
+ volatile unsigned short *dmawp;
+
+#ifdef DMA_DEBUG
+ printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
+#endif
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
+
+ // Clear config errors
+ dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
+
+ // Set command register
+ dmalp[MCFDMA_DMR] =
+ MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting
+ MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data.
+ MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data.
+ // source static-address-mode
+ ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
+ // dest static-address-mode
+ ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
+ // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272
+ (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
+ (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
+
+ dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
+
+#ifdef DEBUG_DMA
+ printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
+ dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
+ (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
+#endif
+}
+
+/* Set transfer address for specific DMA channel */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ volatile unsigned int *dmalp;
+
+#ifdef DMA_DEBUG
+ printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
+#endif
+
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+
+ // Determine which address registers are used for memory/device accesses
+ if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
+ // Source incrementing, must be memory
+ dmalp[MCFDMA_DSAR] = a;
+ // Set dest address, must be device
+ dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
+ } else {
+ // Destination incrementing, must be memory
+ dmalp[MCFDMA_DDAR] = a;
+ // Set source address, must be device
+ dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
+ }
+
+#ifdef DEBUG_DMA
+ printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
+ __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
+ (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
+ (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
+#endif
+}
+
+/*
+ * Specific for Coldfire - sets device address.
+ * Should be called after the mode set call, and before set DMA address.
+ */
+static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
+{
+#ifdef DMA_DEBUG
+ printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
+#endif
+
+ dma_device_address[dmanr] = a;
+}
+
+/*
+ * NOTE 2: "count" represents _bytes_.
+ *
+ * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ volatile unsigned int *dmalp;
+
+#ifdef DMA_DEBUG
+ printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
+#endif
+
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+ dmalp[MCFDMA_DBCR] = count;
+}
+
+/*
+ * Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ volatile unsigned int *dmalp;
+ unsigned int count;
+
+#ifdef DMA_DEBUG
+ printk("get_dma_residue(dmanr=%d)\n", dmanr);
+#endif
+
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
+ count = dmalp[MCFDMA_DBCR];
+ return(count);
+}
+
+#endif /* !defined(CONFIG_M5272) */
+#endif /* CONFIG_COLDFIRE */
+
+/* it's useless on the m68k, but unfortunately needed by the new
+ bootmem allocator (but this should do it for this) */
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+
+#define MAX_DMA_CHANNELS 8
+
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+#define isa_dma_bridge_buggy (0)
+
+#endif /* _M68K_DMA_H */
diff --git a/arch/m68k/include/asm/dma_mm.h b/arch/m68k/include/asm/dma_mm.h
deleted file mode 100644
index 4240fbc946f8..000000000000
--- a/arch/m68k/include/asm/dma_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _M68K_DMA_H
-#define _M68K_DMA_H 1
-
-
-/* it's useless on the m68k, but unfortunately needed by the new
- bootmem allocator (but this should do it for this) */
-#define MAX_DMA_ADDRESS PAGE_OFFSET
-
-#define MAX_DMA_CHANNELS 8
-
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-#define isa_dma_bridge_buggy (0)
-
-#endif /* _M68K_DMA_H */
diff --git a/arch/m68k/include/asm/dma_no.h b/arch/m68k/include/asm/dma_no.h
deleted file mode 100644
index 939a02056217..000000000000
--- a/arch/m68k/include/asm/dma_no.h
+++ /dev/null
@@ -1,494 +0,0 @@
-#ifndef _M68K_DMA_H
-#define _M68K_DMA_H 1
-
-//#define DMA_DEBUG 1
-
-
-#ifdef CONFIG_COLDFIRE
-/*
- * ColdFire DMA Model:
- * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
- * address mode emits a source address, and expects that the device will either
- * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
- * the device will place data on the correct byte(s) of the data bus, as the
- * memory transactions are always 32 bits. This implies that only 32 bit
- * devices will find single mode transfers useful. Dual address DMA mode
- * performs two cycles: source read and destination write. ColdFire will
- * align the data so that the device will always get the correct bytes, thus
- * is useful for 8 and 16 bit devices. This is the mode that is supported
- * below.
- *
- * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
- * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
- *
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
- * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
- *
- * APR/18/2002 : added proper support for MCF5272 DMA controller.
- * Arthur Shipkowski (art@videon-central.com)
- */
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfdma.h>
-
-/*
- * Set number of channels of DMA on ColdFire for different implementations.
- */
-#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
- defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
-#define MAX_M68K_DMA_CHANNELS 4
-#elif defined(CONFIG_M5272)
-#define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M532x)
-#define MAX_M68K_DMA_CHANNELS 0
-#else
-#define MAX_M68K_DMA_CHANNELS 2
-#endif
-
-extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
-extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
-
-#if !defined(CONFIG_M5272)
-#define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
-#define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
-#define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
-#define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
-
-/* I/O to memory, 8 bits, mode */
-#define DMA_MODE_READ 0
-/* memory to I/O, 8 bits, mode */
-#define DMA_MODE_WRITE 1
-/* I/O to memory, 16 bits, mode */
-#define DMA_MODE_READ_WORD 2
-/* memory to I/O, 16 bits, mode */
-#define DMA_MODE_WRITE_WORD 3
-/* I/O to memory, 32 bits, mode */
-#define DMA_MODE_READ_LONG 4
-/* memory to I/O, 32 bits, mode */
-#define DMA_MODE_WRITE_LONG 5
-/* I/O to memory, 8 bits, single-address-mode */
-#define DMA_MODE_READ_SINGLE 8
-/* memory to I/O, 8 bits, single-address-mode */
-#define DMA_MODE_WRITE_SINGLE 9
-/* I/O to memory, 16 bits, single-address-mode */
-#define DMA_MODE_READ_WORD_SINGLE 10
-/* memory to I/O, 16 bits, single-address-mode */
-#define DMA_MODE_WRITE_WORD_SINGLE 11
-/* I/O to memory, 32 bits, single-address-mode */
-#define DMA_MODE_READ_LONG_SINGLE 12
-/* memory to I/O, 32 bits, single-address-mode */
-#define DMA_MODE_WRITE_LONG_SINGLE 13
-
-#else /* CONFIG_M5272 is defined */
-
-/* Source static-address mode */
-#define DMA_MODE_SRC_SA_BIT 0x01
-/* Two bits to select between all four modes */
-#define DMA_MODE_SSIZE_MASK 0x06
-/* Offset to shift bits in */
-#define DMA_MODE_SSIZE_OFF 0x01
-/* Destination static-address mode */
-#define DMA_MODE_DES_SA_BIT 0x10
-/* Two bits to select between all four modes */
-#define DMA_MODE_DSIZE_MASK 0x60
-/* Offset to shift bits in */
-#define DMA_MODE_DSIZE_OFF 0x05
-/* Size modifiers */
-#define DMA_MODE_SIZE_LONG 0x00
-#define DMA_MODE_SIZE_BYTE 0x01
-#define DMA_MODE_SIZE_WORD 0x02
-#define DMA_MODE_SIZE_LINE 0x03
-
-/*
- * Aliases to help speed quick ports; these may be suboptimal, however. They
- * do not include the SINGLE mode modifiers since the MCF5272 does not have a
- * mode where the device is in control of its addressing.
- */
-
-/* I/O to memory, 8 bits, mode */
-#define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 8 bits, mode */
-#define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-/* I/O to memory, 16 bits, mode */
-#define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 16 bits, mode */
-#define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-/* I/O to memory, 32 bits, mode */
-#define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
-/* memory to I/O, 32 bits, mode */
-#define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
-
-#endif /* !defined(CONFIG_M5272) */
-
-#if !defined(CONFIG_M5272)
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("enable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- volatile unsigned short *dmawp;
- volatile unsigned char *dmapb;
-
-#ifdef DMA_DEBUG
- printk("disable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmapb = (unsigned char *) dma_base_addr[dmanr];
-
- /* Turn off external requests, and stop any DMA in progress */
- dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
- dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
-}
-
-/*
- * Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- *
- * This is a NOP for ColdFire. Provide a stub for compatibility.
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-
- volatile unsigned char *dmabp;
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
-#endif
-
- dmabp = (unsigned char *) dma_base_addr[dmanr];
- dmawp = (unsigned short *) dma_base_addr[dmanr];
-
- // Clear config errors
- dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
-
- // Set command register
- dmawp[MCFDMA_DCR] =
- MCFDMA_DCR_INT | // Enable completion irq
- MCFDMA_DCR_CS | // Force one xfer per request
- MCFDMA_DCR_AA | // Enable auto alignment
- // single-address-mode
- ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
- // sets s_rw (-> r/w) high if Memory to I/0
- ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
- // Memory to I/O or I/O to Memory
- ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
- // 32 bit, 16 bit or 8 bit transfers
- ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
- ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
- MCFDMA_DCR_SSIZE_BYTE)) |
- ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
- ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
- MCFDMA_DCR_DSIZE_BYTE));
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
- dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
- (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
-#endif
-}
-
-/* Set transfer address for specific DMA channel */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- volatile unsigned short *dmawp;
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmalp = (unsigned int *) dma_base_addr[dmanr];
-
- // Determine which address registers are used for memory/device accesses
- if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
- // Source incrementing, must be memory
- dmalp[MCFDMA_SAR] = a;
- // Set dest address, must be device
- dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
- } else {
- // Destination incrementing, must be memory
- dmalp[MCFDMA_DAR] = a;
- // Set source address, must be device
- dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
- }
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
- __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
- (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
- (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
-#endif
-}
-
-/*
- * Specific for Coldfire - sets device address.
- * Should be called after the mode set call, and before set DMA address.
- */
-static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
-{
-#ifdef DMA_DEBUG
- printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dma_device_address[dmanr] = a;
-}
-
-/*
- * NOTE 2: "count" represents _bytes_.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- dmawp[MCFDMA_BCR] = (unsigned short)count;
-}
-
-/*
- * Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- volatile unsigned short *dmawp;
- unsigned short count;
-
-#ifdef DMA_DEBUG
- printk("get_dma_residue(dmanr=%d)\n", dmanr);
-#endif
-
- dmawp = (unsigned short *) dma_base_addr[dmanr];
- count = dmawp[MCFDMA_BCR];
- return((int) count);
-}
-#else /* CONFIG_M5272 is defined */
-
-/*
- * The MCF5272 DMA controller is very different than the controller defined above
- * in terms of register mapping. For instance, with the exception of the 16-bit
- * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
- *
- * The big difference, however, is the lack of device-requested DMA. All modes
- * are dual address transfer, and there is no 'device' setup or direction bit.
- * You can DMA between a device and memory, between memory and memory, or even between
- * two devices directly, with any combination of incrementing and non-incrementing
- * addresses you choose. This puts a crimp in distinguishing between the 'device
- * address' set up by set_dma_device_addr.
- *
- * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
- * which will act exactly as above in -- it will look to see if the source is set to
- * autoincrement, and if so it will make the source use the set_dma_addr value and the
- * destination the set_dma_device_addr value. Otherwise the source will be set to the
- * set_dma_device_addr value and the destination will get the set_dma_addr value.
- *
- * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
- * and make it explicit. Depending on what you're doing, one of these two should work
- * for you, but don't mix them in the same transfer setup.
- */
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("enable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("disable_dma(dmanr=%d)\n", dmanr);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
-
- /* Turn off external requests, and stop any DMA in progress */
- dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
- dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
-}
-
-/*
- * Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- *
- * This is a NOP for ColdFire. Provide a stub for compatibility.
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-
- volatile unsigned int *dmalp;
- volatile unsigned short *dmawp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
-#endif
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- dmawp = (unsigned short *) dma_base_addr[dmanr];
-
- // Clear config errors
- dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
-
- // Set command register
- dmalp[MCFDMA_DMR] =
- MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting
- MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data.
- MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data.
- // source static-address-mode
- ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
- // dest static-address-mode
- ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
- // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272
- (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
- (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
-
- dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
- dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
- (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
-#endif
-}
-
-/* Set transfer address for specific DMA channel */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
-
- // Determine which address registers are used for memory/device accesses
- if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
- // Source incrementing, must be memory
- dmalp[MCFDMA_DSAR] = a;
- // Set dest address, must be device
- dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
- } else {
- // Destination incrementing, must be memory
- dmalp[MCFDMA_DDAR] = a;
- // Set source address, must be device
- dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
- }
-
-#ifdef DEBUG_DMA
- printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
- __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
- (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
- (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
-#endif
-}
-
-/*
- * Specific for Coldfire - sets device address.
- * Should be called after the mode set call, and before set DMA address.
- */
-static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
-{
-#ifdef DMA_DEBUG
- printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
-#endif
-
- dma_device_address[dmanr] = a;
-}
-
-/*
- * NOTE 2: "count" represents _bytes_.
- *
- * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- volatile unsigned int *dmalp;
-
-#ifdef DMA_DEBUG
- printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- dmalp[MCFDMA_DBCR] = count;
-}
-
-/*
- * Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- volatile unsigned int *dmalp;
- unsigned int count;
-
-#ifdef DMA_DEBUG
- printk("get_dma_residue(dmanr=%d)\n", dmanr);
-#endif
-
- dmalp = (unsigned int *) dma_base_addr[dmanr];
- count = dmalp[MCFDMA_DBCR];
- return(count);
-}
-
-#endif /* !defined(CONFIG_M5272) */
-#endif /* CONFIG_COLDFIRE */
-
-#define MAX_DMA_CHANNELS 8
-
-/* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any
- occurrence should be flagged as an error. */
-/* under 2.4 it is actually needed by the new bootmem allocator */
-#define MAX_DMA_ADDRESS PAGE_OFFSET
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-#endif /* _M68K_DMA_H */
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
new file mode 100644
index 000000000000..283214dc65a7
--- /dev/null
+++ b/arch/m68k/include/asm/gpio.h
@@ -0,0 +1,238 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#ifndef coldfire_gpio_h
+#define coldfire_gpio_h
+
+#include <linux/io.h>
+#include <asm-generic/gpio.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+/*
+ * The Freescale Coldfire family is quite varied in how they implement GPIO.
+ * Some parts have 8 bit ports, some have 16bit and some have 32bit; some have
+ * only one port, others have multiple ports; some have a single data latch
+ * for both input and output, others have a separate pin data register to read
+ * input; some require a read-modify-write access to change an output, others
+ * have set and clear registers for some of the outputs; Some have all the
+ * GPIOs in a single control area, others have some GPIOs implemented in
+ * different modules.
+ *
+ * This implementation attempts accomodate the differences while presenting
+ * a generic interface that will optimize to as few instructions as possible.
+ */
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
+
+/* These parts have GPIO organized by 8 bit ports */
+
+#define MCFGPIO_PORTTYPE u8
+#define MCFGPIO_PORTSIZE 8
+#define mcfgpio_read(port) __raw_readb(port)
+#define mcfgpio_write(data, port) __raw_writeb(data, port)
+
+#elif defined(CONFIG_M5307) || defined(CONFIG_M5407) || defined(CONFIG_M5272)
+
+/* These parts have GPIO organized by 16 bit ports */
+
+#define MCFGPIO_PORTTYPE u16
+#define MCFGPIO_PORTSIZE 16
+#define mcfgpio_read(port) __raw_readw(port)
+#define mcfgpio_write(data, port) __raw_writew(data, port)
+
+#elif defined(CONFIG_M5249)
+
+/* These parts have GPIO organized by 32 bit ports */
+
+#define MCFGPIO_PORTTYPE u32
+#define MCFGPIO_PORTSIZE 32
+#define mcfgpio_read(port) __raw_readl(port)
+#define mcfgpio_write(data, port) __raw_writel(data, port)
+
+#endif
+
+#define mcfgpio_bit(gpio) (1 << ((gpio) % MCFGPIO_PORTSIZE))
+#define mcfgpio_port(gpio) ((gpio) / MCFGPIO_PORTSIZE)
+
+#if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
+/*
+ * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
+ * read-modify-write to change an output and a GPIO module which has separate
+ * set/clr registers to directly change outputs with a single write access.
+ */
+#if defined(CONFIG_M528x)
+/*
+ * The 528x also has GPIOs in other modules (GPT, QADC) which use
+ * read-modify-write as well as those controlled by the EPORT and GPIO modules.
+ */
+#define MCFGPIO_SCR_START 40
+#else
+#define MCFGPIO_SCR_START 8
+#endif
+
+#define MCFGPIO_SETR_PORT(gpio) (MCFGPIO_SETR + \
+ mcfgpio_port(gpio - MCFGPIO_SCR_START))
+
+#define MCFGPIO_CLRR_PORT(gpio) (MCFGPIO_CLRR + \
+ mcfgpio_port(gpio - MCFGPIO_SCR_START))
+#else
+
+#define MCFGPIO_SCR_START MCFGPIO_PIN_MAX
+/* with MCFGPIO_SCR == MCFGPIO_PIN_MAX, these will be optimized away */
+#define MCFGPIO_SETR_PORT(gpio) 0
+#define MCFGPIO_CLRR_PORT(gpio) 0
+
+#endif
+/*
+ * Coldfire specific helper functions
+ */
+
+/* return the port pin data register for a gpio */
+static inline u32 __mcf_gpio_ppdr(unsigned gpio)
+{
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
+ return MCFSIM_PADAT;
+#elif defined(CONFIG_M5272)
+ if (gpio < 16)
+ return MCFSIM_PADAT;
+ else if (gpio < 32)
+ return MCFSIM_PBDAT;
+ else
+ return MCFSIM_PCDAT;
+#elif defined(CONFIG_M5249)
+ if (gpio < 32)
+ return MCFSIM2_GPIOREAD;
+ else
+ return MCFSIM2_GPIO1READ;
+#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
+ if (gpio < 8)
+ return MCFEPORT_EPPDR;
+#if defined(CONFIG_M528x)
+ else if (gpio < 16)
+ return MCFGPTA_GPTPORT;
+ else if (gpio < 24)
+ return MCFGPTB_GPTPORT;
+ else if (gpio < 32)
+ return MCFQADC_PORTQA;
+ else if (gpio < 40)
+ return MCFQADC_PORTQB;
+#endif
+ else
+ return MCFGPIO_PPDR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#endif
+}
+
+/* return the port output data register for a gpio */
+static inline u32 __mcf_gpio_podr(unsigned gpio)
+{
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
+ return MCFSIM_PADAT;
+#elif defined(CONFIG_M5272)
+ if (gpio < 16)
+ return MCFSIM_PADAT;
+ else if (gpio < 32)
+ return MCFSIM_PBDAT;
+ else
+ return MCFSIM_PCDAT;
+#elif defined(CONFIG_M5249)
+ if (gpio < 32)
+ return MCFSIM2_GPIOWRITE;
+ else
+ return MCFSIM2_GPIO1WRITE;
+#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
+ if (gpio < 8)
+ return MCFEPORT_EPDR;
+#if defined(CONFIG_M528x)
+ else if (gpio < 16)
+ return MCFGPTA_GPTPORT;
+ else if (gpio < 24)
+ return MCFGPTB_GPTPORT;
+ else if (gpio < 32)
+ return MCFQADC_PORTQA;
+ else if (gpio < 40)
+ return MCFQADC_PORTQB;
+#endif
+ else
+ return MCFGPIO_PODR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#endif
+}
+
+/*
+ * The Generic GPIO functions
+ *
+ * If the gpio is a compile time constant and is one of the Coldfire gpios,
+ * use the inline version, otherwise dispatch thru gpiolib.
+ */
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ if (__builtin_constant_p(gpio) && gpio < MCFGPIO_PIN_MAX)
+ return mcfgpio_read(__mcf_gpio_ppdr(gpio)) & mcfgpio_bit(gpio);
+ else
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ if (__builtin_constant_p(gpio) && gpio < MCFGPIO_PIN_MAX) {
+ if (gpio < MCFGPIO_SCR_START) {
+ unsigned long flags;
+ MCFGPIO_PORTTYPE data;
+
+ local_irq_save(flags);
+ data = mcfgpio_read(__mcf_gpio_podr(gpio));
+ if (value)
+ data |= mcfgpio_bit(gpio);
+ else
+ data &= ~mcfgpio_bit(gpio);
+ mcfgpio_write(data, __mcf_gpio_podr(gpio));
+ local_irq_restore(flags);
+ } else {
+ if (value)
+ mcfgpio_write(mcfgpio_bit(gpio),
+ MCFGPIO_SETR_PORT(gpio));
+ else
+ mcfgpio_write(~mcfgpio_bit(gpio),
+ MCFGPIO_CLRR_PORT(gpio));
+ }
+ } else
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_to_irq(unsigned gpio)
+{
+ return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL;
+}
+
+static inline int irq_to_gpio(unsigned irq)
+{
+ return (irq >= MCFGPIO_IRQ_VECBASE &&
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) ?
+ irq - MCFGPIO_IRQ_VECBASE : -ENXIO;
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
+}
+
+#endif
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 56d0d5db231c..7167ed209823 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -1,5 +1,18 @@
-#ifdef __uClinux__
-#include "hardirq_no.h"
-#else
-#include "hardirq_mm.h"
+#ifndef __M68K_HARDIRQ_H
+#define __M68K_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/cache.h>
+
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#define HARDIRQ_BITS 8
+
+void ack_bad_irq(unsigned int irq);
+
#endif
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
deleted file mode 100644
index 394ee946015c..000000000000
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define HARDIRQ_BITS 8
-
-#endif
diff --git a/arch/m68k/include/asm/hardirq_no.h b/arch/m68k/include/asm/hardirq_no.h
deleted file mode 100644
index bfad28149a49..000000000000
--- a/arch/m68k/include/asm/hardirq_no.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <linux/cache.h>
-#include <linux/threads.h>
-#include <asm/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define HARDIRQ_BITS 8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-void ack_bad_irq(unsigned int irq);
-
-#endif /* __M68K_HARDIRQ_H */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index d031416595b2..cc0614a6deeb 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -1,5 +1,134 @@
-#ifdef __uClinux__
-#include "irq_no.h"
+#ifndef _M68K_IRQ_H_
+#define _M68K_IRQ_H_
+
+#include <linux/linkage.h>
+#include <linux/hardirq.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * This should be the same as the max(NUM_X_SOURCES) for all the
+ * different m68k hosts compiled into the kernel.
+ * Currently the Atari has 72 and the Amiga 24, but if both are
+ * supported in the kernel it is better to make room for 72.
+ */
+#if defined(CONFIG_COLDFIRE)
+#define NR_IRQS 256
+#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
+#define NR_IRQS 200
+#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
+#define NR_IRQS 72
+#elif defined(CONFIG_Q40)
+#define NR_IRQS 43
+#elif defined(CONFIG_AMIGA) || !defined(CONFIG_MMU)
+#define NR_IRQS 32
+#elif defined(CONFIG_APOLLO)
+#define NR_IRQS 24
+#elif defined(CONFIG_HP300)
+#define NR_IRQS 8
#else
-#include "irq_mm.h"
+#define NR_IRQS 0
#endif
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+#ifdef CONFIG_MMU
+
+/*
+ * Interrupt source definitions
+ * General interrupt sources are the level 1-7.
+ * Adding an interrupt service routine for one of these sources
+ * results in the addition of that routine to a chain of routines.
+ * Each one is called in succession. Each individual interrupt
+ * service routine should determine if the device associated with
+ * that routine requires service.
+ */
+
+#define IRQ_SPURIOUS 0
+
+#define IRQ_AUTO_1 1 /* level 1 interrupt */
+#define IRQ_AUTO_2 2 /* level 2 interrupt */
+#define IRQ_AUTO_3 3 /* level 3 interrupt */
+#define IRQ_AUTO_4 4 /* level 4 interrupt */
+#define IRQ_AUTO_5 5 /* level 5 interrupt */
+#define IRQ_AUTO_6 6 /* level 6 interrupt */
+#define IRQ_AUTO_7 7 /* level 7 interrupt (non-maskable) */
+
+#define IRQ_USER 8
+
+extern unsigned int irq_canonicalize(unsigned int irq);
+
+struct pt_regs;
+
+/*
+ * various flags for request_irq() - the Amiga now uses the standard
+ * mechanism like all other architectures - IRQF_DISABLED and
+ * IRQF_SHARED are your friends.
+ */
+#ifndef MACH_AMIGA_ONLY
+#define IRQ_FLG_LOCK (0x0001) /* handler is not replaceable */
+#define IRQ_FLG_REPLACE (0x0002) /* replace existing handler */
+#define IRQ_FLG_FAST (0x0004)
+#define IRQ_FLG_SLOW (0x0008)
+#define IRQ_FLG_STD (0x8000) /* internally used */
+#endif
+
+/*
+ * This structure is used to chain together the ISRs for a particular
+ * interrupt source (if it supports chaining).
+ */
+typedef struct irq_node {
+ irqreturn_t (*handler)(int, void *);
+ void *dev_id;
+ struct irq_node *next;
+ unsigned long flags;
+ const char *devname;
+} irq_node_t;
+
+/*
+ * This structure has only 4 elements for speed reasons
+ */
+struct irq_handler {
+ int (*handler)(int, void *);
+ unsigned long flags;
+ void *dev_id;
+ const char *devname;
+};
+
+struct irq_controller {
+ const char *name;
+ spinlock_t lock;
+ int (*startup)(unsigned int irq);
+ void (*shutdown)(unsigned int irq);
+ void (*enable)(unsigned int irq);
+ void (*disable)(unsigned int irq);
+};
+
+extern int m68k_irq_startup(unsigned int);
+extern void m68k_irq_shutdown(unsigned int);
+
+/*
+ * This function returns a new irq_node_t
+ */
+extern irq_node_t *new_irq_node(void);
+
+extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *));
+extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
+ void (*handler)(unsigned int, struct pt_regs *));
+extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
+
+asmlinkage void m68k_handle_int(unsigned int);
+asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
+
+#else
+#define irq_canonicalize(irq) (irq)
+#endif /* CONFIG_MMU */
+
+#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/irq_mm.h b/arch/m68k/include/asm/irq_mm.h
deleted file mode 100644
index 0cab42cad79e..000000000000
--- a/arch/m68k/include/asm/irq_mm.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef _M68K_IRQ_H_
-#define _M68K_IRQ_H_
-
-#include <linux/linkage.h>
-#include <linux/hardirq.h>
-#include <linux/irqreturn.h>
-#include <linux/spinlock_types.h>
-
-/*
- * This should be the same as the max(NUM_X_SOURCES) for all the
- * different m68k hosts compiled into the kernel.
- * Currently the Atari has 72 and the Amiga 24, but if both are
- * supported in the kernel it is better to make room for 72.
- */
-#if defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
-#define NR_IRQS 200
-#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
-#define NR_IRQS 72
-#elif defined(CONFIG_Q40)
-#define NR_IRQS 43
-#elif defined(CONFIG_AMIGA)
-#define NR_IRQS 32
-#elif defined(CONFIG_APOLLO)
-#define NR_IRQS 24
-#elif defined(CONFIG_HP300)
-#define NR_IRQS 8
-#else
-#define NR_IRQS 0
-#endif
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-/*
- * Interrupt source definitions
- * General interrupt sources are the level 1-7.
- * Adding an interrupt service routine for one of these sources
- * results in the addition of that routine to a chain of routines.
- * Each one is called in succession. Each individual interrupt
- * service routine should determine if the device associated with
- * that routine requires service.
- */
-
-#define IRQ_SPURIOUS 0
-
-#define IRQ_AUTO_1 1 /* level 1 interrupt */
-#define IRQ_AUTO_2 2 /* level 2 interrupt */
-#define IRQ_AUTO_3 3 /* level 3 interrupt */
-#define IRQ_AUTO_4 4 /* level 4 interrupt */
-#define IRQ_AUTO_5 5 /* level 5 interrupt */
-#define IRQ_AUTO_6 6 /* level 6 interrupt */
-#define IRQ_AUTO_7 7 /* level 7 interrupt (non-maskable) */
-
-#define IRQ_USER 8
-
-extern unsigned int irq_canonicalize(unsigned int irq);
-
-struct pt_regs;
-
-/*
- * various flags for request_irq() - the Amiga now uses the standard
- * mechanism like all other architectures - IRQF_DISABLED and
- * IRQF_SHARED are your friends.
- */
-#ifndef MACH_AMIGA_ONLY
-#define IRQ_FLG_LOCK (0x0001) /* handler is not replaceable */
-#define IRQ_FLG_REPLACE (0x0002) /* replace existing handler */
-#define IRQ_FLG_FAST (0x0004)
-#define IRQ_FLG_SLOW (0x0008)
-#define IRQ_FLG_STD (0x8000) /* internally used */
-#endif
-
-/*
- * This structure is used to chain together the ISRs for a particular
- * interrupt source (if it supports chaining).
- */
-typedef struct irq_node {
- irqreturn_t (*handler)(int, void *);
- void *dev_id;
- struct irq_node *next;
- unsigned long flags;
- const char *devname;
-} irq_node_t;
-
-/*
- * This structure has only 4 elements for speed reasons
- */
-struct irq_handler {
- int (*handler)(int, void *);
- unsigned long flags;
- void *dev_id;
- const char *devname;
-};
-
-struct irq_controller {
- const char *name;
- spinlock_t lock;
- int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
-};
-
-extern int m68k_irq_startup(unsigned int);
-extern void m68k_irq_shutdown(unsigned int);
-
-/*
- * This function returns a new irq_node_t
- */
-extern irq_node_t *new_irq_node(void);
-
-extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *));
-extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
- void (*handler)(unsigned int, struct pt_regs *));
-extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
-
-asmlinkage void m68k_handle_int(unsigned int);
-asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
-
-#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/irq_no.h b/arch/m68k/include/asm/irq_no.h
deleted file mode 100644
index 9373c31ac87d..000000000000
--- a/arch/m68k/include/asm/irq_no.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _M68KNOMMU_IRQ_H_
-#define _M68KNOMMU_IRQ_H_
-
-#ifdef CONFIG_COLDFIRE
-/*
- * On the ColdFire we keep track of all vectors. That way drivers
- * can register whatever vector number they wish, and we can deal
- * with it.
- */
-#define SYS_IRQS 256
-#define NR_IRQS SYS_IRQS
-
-#else
-
-/*
- * # of m68k interrupts
- */
-#define SYS_IRQS 8
-#define NR_IRQS (24 + SYS_IRQS)
-
-#endif /* CONFIG_COLDFIRE */
-
-
-#define irq_canonicalize(irq) (irq)
-
-#endif /* _M68KNOMMU_IRQ_H_ */
diff --git a/arch/m68k/include/asm/m5206sim.h b/arch/m68k/include/asm/m5206sim.h
index 7e3594dea88b..9c384e294af9 100644
--- a/arch/m68k/include/asm/m5206sim.h
+++ b/arch/m68k/include/asm/m5206sim.h
@@ -85,8 +85,21 @@
#define MCFSIM_PAR 0xcb /* Pin Assignment reg (r/w) */
#endif
-#define MCFSIM_PADDR 0x1c5 /* Parallel Direction (r/w) */
-#define MCFSIM_PADAT 0x1c9 /* Parallel Port Value (r/w) */
+#define MCFSIM_PADDR (MCF_MBAR + 0x1c5) /* Parallel Direction (r/w) */
+#define MCFSIM_PADAT (MCF_MBAR + 0x1c9) /* Parallel Port Value (r/w) */
+
+/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
+#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
+
+/*
+ * Generic GPIO
+ */
+#define MCFGPIO_PIN_MAX 8
+#define MCFGPIO_IRQ_VECBASE -1
+#define MCFGPIO_IRQ_MAX -1
/*
* Some symbol defines for the Parallel Port Pin Assignment Register
@@ -111,21 +124,5 @@
#define MCFSIM_DMA2ICR MCFSIM_ICR15 /* DMA 2 ICR */
#endif
-#if defined(CONFIG_M5206e)
-#define MCFSIM_IMR_MASKALL 0xfffe /* All SIM intr sources */
-#endif
-
-/*
- * Macro to get and set IMR register. It is 16 bits on the 5206.
- */
-#define mcf_getimr() \
- *((volatile unsigned short *) (MCF_MBAR + MCFSIM_IMR))
-
-#define mcf_setimr(imr) \
- *((volatile unsigned short *) (MCF_MBAR + MCFSIM_IMR)) = (imr)
-
-#define mcf_getipr() \
- *((volatile unsigned short *) (MCF_MBAR + MCFSIM_IPR))
-
/****************************************************************************/
#endif /* m5206sim_h */
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index 83bbcfd6e8f2..ed2b69b96805 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -11,9 +11,8 @@
#define m520xsim_h
/****************************************************************************/
-
/*
- * Define the 5282 SIM register set addresses.
+ * Define the 520x SIM register set addresses.
*/
#define MCFICM_INTC0 0x48000 /* Base for Interrupt Ctrl 0 */
#define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */
@@ -22,8 +21,22 @@
#define MCFINTC_IMRL 0x0c /* Interrupt mask 1-31 */
#define MCFINTC_INTFRCH 0x10 /* Interrupt force 32-63 */
#define MCFINTC_INTFRCL 0x14 /* Interrupt force 1-31 */
+#define MCFINTC_SIMR 0x1c /* Set interrupt mask 0-63 */
+#define MCFINTC_CIMR 0x1d /* Clear interrupt mask 0-63 */
#define MCFINTC_ICR0 0x40 /* Base ICR register */
+/*
+ * The common interrupt controller code just wants to know the absolute
+ * address to the SIMR and CIMR registers (not offsets into IPSBAR).
+ * The 520x family only has a single INTC unit.
+ */
+#define MCFINTC0_SIMR (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_SIMR)
+#define MCFINTC0_CIMR (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_CIMR)
+#define MCFINTC0_ICR0 (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0)
+#define MCFINTC1_SIMR (0)
+#define MCFINTC1_CIMR (0)
+#define MCFINTC1_ICR0 (0)
+
#define MCFINT_VECBASE 64
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
@@ -41,6 +54,62 @@
#define MCFSIM_SDCS0 0x000a8110 /* SDRAM Chip Select 0 Configuration */
#define MCFSIM_SDCS1 0x000a8114 /* SDRAM Chip Select 1 Configuration */
+#define MCFEPORT_EPDDR 0xFC088002
+#define MCFEPORT_EPDR 0xFC088004
+#define MCFEPORT_EPPDR 0xFC088005
+
+#define MCFGPIO_PODR_BUSCTL 0xFC0A4000
+#define MCFGPIO_PODR_BE 0xFC0A4001
+#define MCFGPIO_PODR_CS 0xFC0A4002
+#define MCFGPIO_PODR_FECI2C 0xFC0A4003
+#define MCFGPIO_PODR_QSPI 0xFC0A4004
+#define MCFGPIO_PODR_TIMER 0xFC0A4005
+#define MCFGPIO_PODR_UART 0xFC0A4006
+#define MCFGPIO_PODR_FECH 0xFC0A4007
+#define MCFGPIO_PODR_FECL 0xFC0A4008
+
+#define MCFGPIO_PDDR_BUSCTL 0xFC0A400C
+#define MCFGPIO_PDDR_BE 0xFC0A400D
+#define MCFGPIO_PDDR_CS 0xFC0A400E
+#define MCFGPIO_PDDR_FECI2C 0xFC0A400F
+#define MCFGPIO_PDDR_QSPI 0xFC0A4010
+#define MCFGPIO_PDDR_TIMER 0xFC0A4011
+#define MCFGPIO_PDDR_UART 0xFC0A4012
+#define MCFGPIO_PDDR_FECH 0xFC0A4013
+#define MCFGPIO_PDDR_FECL 0xFC0A4014
+
+#define MCFGPIO_PPDSDR_BUSCTL 0xFC0A401A
+#define MCFGPIO_PPDSDR_BE 0xFC0A401B
+#define MCFGPIO_PPDSDR_CS 0xFC0A401C
+#define MCFGPIO_PPDSDR_FECI2C 0xFC0A401D
+#define MCFGPIO_PPDSDR_QSPI 0xFC0A401E
+#define MCFGPIO_PPDSDR_TIMER 0xFC0A401F
+#define MCFGPIO_PPDSDR_UART 0xFC0A4021
+#define MCFGPIO_PPDSDR_FECH 0xFC0A4021
+#define MCFGPIO_PPDSDR_FECL 0xFC0A4022
+
+#define MCFGPIO_PCLRR_BUSCTL 0xFC0A4024
+#define MCFGPIO_PCLRR_BE 0xFC0A4025
+#define MCFGPIO_PCLRR_CS 0xFC0A4026
+#define MCFGPIO_PCLRR_FECI2C 0xFC0A4027
+#define MCFGPIO_PCLRR_QSPI 0xFC0A4028
+#define MCFGPIO_PCLRR_TIMER 0xFC0A4029
+#define MCFGPIO_PCLRR_UART 0xFC0A402A
+#define MCFGPIO_PCLRR_FECH 0xFC0A402B
+#define MCFGPIO_PCLRR_FECL 0xFC0A402C
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PODR MCFGPIO_PODR_BUSCTL
+#define MCFGPIO_PDDR MCFGPIO_PDDR_BUSCTL
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_BUSCTL
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_BUSCTL
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_BUSCTL
+
+#define MCFGPIO_PIN_MAX 80
+#define MCFGPIO_IRQ_MAX 8
+#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+/****************************************************************************/
#define MCF_GPIO_PAR_UART (0xA4036)
#define MCF_GPIO_PAR_FECI2C (0xA4033)
@@ -55,10 +124,6 @@
#define MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2 (0x02)
#define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 (0x04)
-#define ICR_INTRCONF 0x05
-#define MCFPIT_IMR MCFINTC_IMRL
-#define MCFPIT_IMR_IBIT (1 << MCFINT_PIT1)
-
/*
* Reset Controll Unit.
*/
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index 55183b5df1b8..a34894cf8e6f 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -50,5 +50,82 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
+#define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000)
+#define MCFGPIO_PODR_DATAH (MCF_IPSBAR + 0x100001)
+#define MCFGPIO_PODR_DATAL (MCF_IPSBAR + 0x100002)
+#define MCFGPIO_PODR_BUSCTL (MCF_IPSBAR + 0x100003)
+#define MCFGPIO_PODR_BS (MCF_IPSBAR + 0x100004)
+#define MCFGPIO_PODR_CS (MCF_IPSBAR + 0x100005)
+#define MCFGPIO_PODR_SDRAM (MCF_IPSBAR + 0x100006)
+#define MCFGPIO_PODR_FECI2C (MCF_IPSBAR + 0x100007)
+#define MCFGPIO_PODR_UARTH (MCF_IPSBAR + 0x100008)
+#define MCFGPIO_PODR_UARTL (MCF_IPSBAR + 0x100009)
+#define MCFGPIO_PODR_QSPI (MCF_IPSBAR + 0x10000A)
+#define MCFGPIO_PODR_TIMER (MCF_IPSBAR + 0x10000B)
+#define MCFGPIO_PODR_ETPU (MCF_IPSBAR + 0x10000C)
+
+#define MCFGPIO_PDDR_ADDR (MCF_IPSBAR + 0x100010)
+#define MCFGPIO_PDDR_DATAH (MCF_IPSBAR + 0x100011)
+#define MCFGPIO_PDDR_DATAL (MCF_IPSBAR + 0x100012)
+#define MCFGPIO_PDDR_BUSCTL (MCF_IPSBAR + 0x100013)
+#define MCFGPIO_PDDR_BS (MCF_IPSBAR + 0x100014)
+#define MCFGPIO_PDDR_CS (MCF_IPSBAR + 0x100015)
+#define MCFGPIO_PDDR_SDRAM (MCF_IPSBAR + 0x100016)
+#define MCFGPIO_PDDR_FECI2C (MCF_IPSBAR + 0x100017)
+#define MCFGPIO_PDDR_UARTH (MCF_IPSBAR + 0x100018)
+#define MCFGPIO_PDDR_UARTL (MCF_IPSBAR + 0x100019)
+#define MCFGPIO_PDDR_QSPI (MCF_IPSBAR + 0x10001A)
+#define MCFGPIO_PDDR_TIMER (MCF_IPSBAR + 0x10001B)
+#define MCFGPIO_PDDR_ETPU (MCF_IPSBAR + 0x10001C)
+
+#define MCFGPIO_PPDSDR_ADDR (MCF_IPSBAR + 0x100020)
+#define MCFGPIO_PPDSDR_DATAH (MCF_IPSBAR + 0x100021)
+#define MCFGPIO_PPDSDR_DATAL (MCF_IPSBAR + 0x100022)
+#define MCFGPIO_PPDSDR_BUSCTL (MCF_IPSBAR + 0x100023)
+#define MCFGPIO_PPDSDR_BS (MCF_IPSBAR + 0x100024)
+#define MCFGPIO_PPDSDR_CS (MCF_IPSBAR + 0x100025)
+#define MCFGPIO_PPDSDR_SDRAM (MCF_IPSBAR + 0x100026)
+#define MCFGPIO_PPDSDR_FECI2C (MCF_IPSBAR + 0x100027)
+#define MCFGPIO_PPDSDR_UARTH (MCF_IPSBAR + 0x100028)
+#define MCFGPIO_PPDSDR_UARTL (MCF_IPSBAR + 0x100029)
+#define MCFGPIO_PPDSDR_QSPI (MCF_IPSBAR + 0x10002A)
+#define MCFGPIO_PPDSDR_TIMER (MCF_IPSBAR + 0x10002B)
+#define MCFGPIO_PPDSDR_ETPU (MCF_IPSBAR + 0x10002C)
+
+#define MCFGPIO_PCLRR_ADDR (MCF_IPSBAR + 0x100030)
+#define MCFGPIO_PCLRR_DATAH (MCF_IPSBAR + 0x100031)
+#define MCFGPIO_PCLRR_DATAL (MCF_IPSBAR + 0x100032)
+#define MCFGPIO_PCLRR_BUSCTL (MCF_IPSBAR + 0x100033)
+#define MCFGPIO_PCLRR_BS (MCF_IPSBAR + 0x100034)
+#define MCFGPIO_PCLRR_CS (MCF_IPSBAR + 0x100035)
+#define MCFGPIO_PCLRR_SDRAM (MCF_IPSBAR + 0x100036)
+#define MCFGPIO_PCLRR_FECI2C (MCF_IPSBAR + 0x100037)
+#define MCFGPIO_PCLRR_UARTH (MCF_IPSBAR + 0x100038)
+#define MCFGPIO_PCLRR_UARTL (MCF_IPSBAR + 0x100039)
+#define MCFGPIO_PCLRR_QSPI (MCF_IPSBAR + 0x10003A)
+#define MCFGPIO_PCLRR_TIMER (MCF_IPSBAR + 0x10003B)
+#define MCFGPIO_PCLRR_ETPU (MCF_IPSBAR + 0x10003C)
+
+/*
+ * EPort
+ */
+
+#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002)
+#define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004)
+#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PODR MCFGPIO_PODR_ADDR
+#define MCFGPIO_PDDR MCFGPIO_PDDR_ADDR
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_ADDR
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_ADDR
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_ADDR
+
+#define MCFGPIO_PIN_MAX 107
+#define MCFGPIO_IRQ_MAX 8
+#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+
/****************************************************************************/
#endif /* m523xsim_h */
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
index 366eb8602d2f..255095d0df64 100644
--- a/arch/m68k/include/asm/m5249sim.h
+++ b/arch/m68k/include/asm/m5249sim.h
@@ -71,16 +71,22 @@
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
+#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
+
+/*
* General purpose IO registers (in MBAR2).
*/
-#define MCFSIM2_GPIOREAD 0x0 /* GPIO read values */
-#define MCFSIM2_GPIOWRITE 0x4 /* GPIO write values */
-#define MCFSIM2_GPIOENABLE 0x8 /* GPIO enabled */
-#define MCFSIM2_GPIOFUNC 0xc /* GPIO function */
-#define MCFSIM2_GPIO1READ 0xb0 /* GPIO1 read values */
-#define MCFSIM2_GPIO1WRITE 0xb4 /* GPIO1 write values */
-#define MCFSIM2_GPIO1ENABLE 0xb8 /* GPIO1 enabled */
-#define MCFSIM2_GPIO1FUNC 0xbc /* GPIO1 function */
+#define MCFSIM2_GPIOREAD (MCF_MBAR2 + 0x000) /* GPIO read values */
+#define MCFSIM2_GPIOWRITE (MCF_MBAR2 + 0x004) /* GPIO write values */
+#define MCFSIM2_GPIOENABLE (MCF_MBAR2 + 0x008) /* GPIO enabled */
+#define MCFSIM2_GPIOFUNC (MCF_MBAR2 + 0x00C) /* GPIO function */
+#define MCFSIM2_GPIO1READ (MCF_MBAR2 + 0x0B0) /* GPIO1 read values */
+#define MCFSIM2_GPIO1WRITE (MCF_MBAR2 + 0x0B4) /* GPIO1 write values */
+#define MCFSIM2_GPIO1ENABLE (MCF_MBAR2 + 0x0B8) /* GPIO1 enabled */
+#define MCFSIM2_GPIO1FUNC (MCF_MBAR2 + 0x0BC) /* GPIO1 function */
#define MCFSIM2_GPIOINTSTAT 0xc0 /* GPIO interrupt status */
#define MCFSIM2_GPIOINTCLEAR 0xc0 /* GPIO interrupt clear */
@@ -100,20 +106,28 @@
#define MCFSIM2_IDECONFIG1 0x18c /* IDEconfig1 */
#define MCFSIM2_IDECONFIG2 0x190 /* IDEconfig2 */
-
/*
- * Macro to set IMR register. It is 32 bits on the 5249.
+ * Generic GPIO support
*/
-#define MCFSIM_IMR_MASKALL 0x7fffe /* All SIM intr sources */
+#define MCFGPIO_PIN_MAX 64
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
-#define mcf_getimr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR))
-
-#define mcf_setimr(imr) \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR)) = (imr);
+/*
+ * Define the base interrupt for the second interrupt controller.
+ * We set it to 128, out of the way of the base interrupts, and plenty
+ * of room for its 64 interrupts.
+ */
+#define MCFINTC2_VECBASE 128
-#define mcf_getipr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPR))
+#define MCFINTC2_GPIOIRQ0 (MCFINTC2_VECBASE + 32)
+#define MCFINTC2_GPIOIRQ1 (MCFINTC2_VECBASE + 33)
+#define MCFINTC2_GPIOIRQ2 (MCFINTC2_VECBASE + 34)
+#define MCFINTC2_GPIOIRQ3 (MCFINTC2_VECBASE + 35)
+#define MCFINTC2_GPIOIRQ4 (MCFINTC2_VECBASE + 36)
+#define MCFINTC2_GPIOIRQ5 (MCFINTC2_VECBASE + 37)
+#define MCFINTC2_GPIOIRQ6 (MCFINTC2_VECBASE + 38)
+#define MCFINTC2_GPIOIRQ7 (MCFINTC2_VECBASE + 39)
/****************************************************************************/
@@ -137,9 +151,9 @@
subql #1,%a1 /* get MBAR2 address in a1 */
/*
- * Move secondary interrupts to base at 128.
+ * Move secondary interrupts to their base (128).
*/
- moveb #0x80,%d0
+ moveb #MCFINTC2_VECBASE,%d0
moveb %d0,0x16b(%a1) /* interrupt base register */
/*
diff --git a/arch/m68k/include/asm/m5272sim.h b/arch/m68k/include/asm/m5272sim.h
index 6217edc21139..469686ffc4af 100644
--- a/arch/m68k/include/asm/m5272sim.h
+++ b/arch/m68k/include/asm/m5272sim.h
@@ -63,16 +63,27 @@
#define MCFSIM_DCMR1 0x5c /* DRAM 1 Mask reg (r/w) */
#define MCFSIM_DCCR1 0x63 /* DRAM 1 Control reg (r/w) */
-#define MCFSIM_PACNT 0x80 /* Port A Control (r/w) */
-#define MCFSIM_PADDR 0x84 /* Port A Direction (r/w) */
-#define MCFSIM_PADAT 0x86 /* Port A Data (r/w) */
-#define MCFSIM_PBCNT 0x88 /* Port B Control (r/w) */
-#define MCFSIM_PBDDR 0x8c /* Port B Direction (r/w) */
-#define MCFSIM_PBDAT 0x8e /* Port B Data (r/w) */
-#define MCFSIM_PCDDR 0x94 /* Port C Direction (r/w) */
-#define MCFSIM_PCDAT 0x96 /* Port C Data (r/w) */
-#define MCFSIM_PDCNT 0x98 /* Port D Control (r/w) */
+#define MCFSIM_PACNT (MCF_MBAR + 0x80) /* Port A Control (r/w) */
+#define MCFSIM_PADDR (MCF_MBAR + 0x84) /* Port A Direction (r/w) */
+#define MCFSIM_PADAT (MCF_MBAR + 0x86) /* Port A Data (r/w) */
+#define MCFSIM_PBCNT (MCF_MBAR + 0x88) /* Port B Control (r/w) */
+#define MCFSIM_PBDDR (MCF_MBAR + 0x8c) /* Port B Direction (r/w) */
+#define MCFSIM_PBDAT (MCF_MBAR + 0x8e) /* Port B Data (r/w) */
+#define MCFSIM_PCDDR (MCF_MBAR + 0x94) /* Port C Direction (r/w) */
+#define MCFSIM_PCDAT (MCF_MBAR + 0x96) /* Port C Data (r/w) */
+#define MCFSIM_PDCNT (MCF_MBAR + 0x98) /* Port D Control (r/w) */
+/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_TIMER 69 /* Timer0, Level 6 */
+#define MCF_IRQ_PROFILER 70 /* Timer1, Level 7 */
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PIN_MAX 48
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
/****************************************************************************/
#endif /* m5272sim_h */
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 95f4f8ee8f7c..453356d72d80 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -54,6 +54,175 @@
#define MCFSIM_DMR1 0x5c /* SDRAM address mask 1 */
#endif
+
+#ifdef CONFIG_M5271
+#define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000)
+#define MCFGPIO_PODR_DATAH (MCF_IPSBAR + 0x100001)
+#define MCFGPIO_PODR_DATAL (MCF_IPSBAR + 0x100002)
+#define MCFGPIO_PODR_BUSCTL (MCF_IPSBAR + 0x100003)
+#define MCFGPIO_PODR_BS (MCF_IPSBAR + 0x100004)
+#define MCFGPIO_PODR_CS (MCF_IPSBAR + 0x100005)
+#define MCFGPIO_PODR_SDRAM (MCF_IPSBAR + 0x100006)
+#define MCFGPIO_PODR_FECI2C (MCF_IPSBAR + 0x100007)
+#define MCFGPIO_PODR_UARTH (MCF_IPSBAR + 0x100008)
+#define MCFGPIO_PODR_UARTL (MCF_IPSBAR + 0x100009)
+#define MCFGPIO_PODR_QSPI (MCF_IPSBAR + 0x10000A)
+#define MCFGPIO_PODR_TIMER (MCF_IPSBAR + 0x10000B)
+
+#define MCFGPIO_PDDR_ADDR (MCF_IPSBAR + 0x100010)
+#define MCFGPIO_PDDR_DATAH (MCF_IPSBAR + 0x100011)
+#define MCFGPIO_PDDR_DATAL (MCF_IPSBAR + 0x100012)
+#define MCFGPIO_PDDR_BUSCTL (MCF_IPSBAR + 0x100013)
+#define MCFGPIO_PDDR_BS (MCF_IPSBAR + 0x100014)
+#define MCFGPIO_PDDR_CS (MCF_IPSBAR + 0x100015)
+#define MCFGPIO_PDDR_SDRAM (MCF_IPSBAR + 0x100016)
+#define MCFGPIO_PDDR_FECI2C (MCF_IPSBAR + 0x100017)
+#define MCFGPIO_PDDR_UARTH (MCF_IPSBAR + 0x100018)
+#define MCFGPIO_PDDR_UARTL (MCF_IPSBAR + 0x100019)
+#define MCFGPIO_PDDR_QSPI (MCF_IPSBAR + 0x10001A)
+#define MCFGPIO_PDDR_TIMER (MCF_IPSBAR + 0x10001B)
+
+#define MCFGPIO_PPDSDR_ADDR (MCF_IPSBAR + 0x100020)
+#define MCFGPIO_PPDSDR_DATAH (MCF_IPSBAR + 0x100021)
+#define MCFGPIO_PPDSDR_DATAL (MCF_IPSBAR + 0x100022)
+#define MCFGPIO_PPDSDR_BUSCTL (MCF_IPSBAR + 0x100023)
+#define MCFGPIO_PPDSDR_BS (MCF_IPSBAR + 0x100024)
+#define MCFGPIO_PPDSDR_CS (MCF_IPSBAR + 0x100025)
+#define MCFGPIO_PPDSDR_SDRAM (MCF_IPSBAR + 0x100026)
+#define MCFGPIO_PPDSDR_FECI2C (MCF_IPSBAR + 0x100027)
+#define MCFGPIO_PPDSDR_UARTH (MCF_IPSBAR + 0x100028)
+#define MCFGPIO_PPDSDR_UARTL (MCF_IPSBAR + 0x100029)
+#define MCFGPIO_PPDSDR_QSPI (MCF_IPSBAR + 0x10002A)
+#define MCFGPIO_PPDSDR_TIMER (MCF_IPSBAR + 0x10002B)
+
+#define MCFGPIO_PCLRR_ADDR (MCF_IPSBAR + 0x100030)
+#define MCFGPIO_PCLRR_DATAH (MCF_IPSBAR + 0x100031)
+#define MCFGPIO_PCLRR_DATAL (MCF_IPSBAR + 0x100032)
+#define MCFGPIO_PCLRR_BUSCTL (MCF_IPSBAR + 0x100033)
+#define MCFGPIO_PCLRR_BS (MCF_IPSBAR + 0x100034)
+#define MCFGPIO_PCLRR_CS (MCF_IPSBAR + 0x100035)
+#define MCFGPIO_PCLRR_SDRAM (MCF_IPSBAR + 0x100036)
+#define MCFGPIO_PCLRR_FECI2C (MCF_IPSBAR + 0x100037)
+#define MCFGPIO_PCLRR_UARTH (MCF_IPSBAR + 0x100038)
+#define MCFGPIO_PCLRR_UARTL (MCF_IPSBAR + 0x100039)
+#define MCFGPIO_PCLRR_QSPI (MCF_IPSBAR + 0x10003A)
+#define MCFGPIO_PCLRR_TIMER (MCF_IPSBAR + 0x10003B)
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PODR MCFGPIO_PODR_ADDR
+#define MCFGPIO_PDDR MCFGPIO_PDDR_ADDR
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_ADDR
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_ADDR
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_ADDR
+
+#define MCFGPIO_PIN_MAX 100
+#define MCFGPIO_IRQ_MAX 8
+#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+#endif
+
+#ifdef CONFIG_M5275
+#define MCFGPIO_PODR_BUSCTL (MCF_IPSBAR + 0x100004)
+#define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100005)
+#define MCFGPIO_PODR_CS (MCF_IPSBAR + 0x100008)
+#define MCFGPIO_PODR_FEC0H (MCF_IPSBAR + 0x10000A)
+#define MCFGPIO_PODR_FEC0L (MCF_IPSBAR + 0x10000B)
+#define MCFGPIO_PODR_FECI2C (MCF_IPSBAR + 0x10000C)
+#define MCFGPIO_PODR_QSPI (MCF_IPSBAR + 0x10000D)
+#define MCFGPIO_PODR_SDRAM (MCF_IPSBAR + 0x10000E)
+#define MCFGPIO_PODR_TIMERH (MCF_IPSBAR + 0x10000F)
+#define MCFGPIO_PODR_TIMERL (MCF_IPSBAR + 0x100010)
+#define MCFGPIO_PODR_UARTL (MCF_IPSBAR + 0x100011)
+#define MCFGPIO_PODR_FEC1H (MCF_IPSBAR + 0x100012)
+#define MCFGPIO_PODR_FEC1L (MCF_IPSBAR + 0x100013)
+#define MCFGPIO_PODR_BS (MCF_IPSBAR + 0x100014)
+#define MCFGPIO_PODR_IRQ (MCF_IPSBAR + 0x100015)
+#define MCFGPIO_PODR_USBH (MCF_IPSBAR + 0x100016)
+#define MCFGPIO_PODR_USBL (MCF_IPSBAR + 0x100017)
+#define MCFGPIO_PODR_UARTH (MCF_IPSBAR + 0x100018)
+
+#define MCFGPIO_PDDR_BUSCTL (MCF_IPSBAR + 0x100020)
+#define MCFGPIO_PDDR_ADDR (MCF_IPSBAR + 0x100021)
+#define MCFGPIO_PDDR_CS (MCF_IPSBAR + 0x100024)
+#define MCFGPIO_PDDR_FEC0H (MCF_IPSBAR + 0x100026)
+#define MCFGPIO_PDDR_FEC0L (MCF_IPSBAR + 0x100027)
+#define MCFGPIO_PDDR_FECI2C (MCF_IPSBAR + 0x100028)
+#define MCFGPIO_PDDR_QSPI (MCF_IPSBAR + 0x100029)
+#define MCFGPIO_PDDR_SDRAM (MCF_IPSBAR + 0x10002A)
+#define MCFGPIO_PDDR_TIMERH (MCF_IPSBAR + 0x10002B)
+#define MCFGPIO_PDDR_TIMERL (MCF_IPSBAR + 0x10002C)
+#define MCFGPIO_PDDR_UARTL (MCF_IPSBAR + 0x10002D)
+#define MCFGPIO_PDDR_FEC1H (MCF_IPSBAR + 0x10002E)
+#define MCFGPIO_PDDR_FEC1L (MCF_IPSBAR + 0x10002F)
+#define MCFGPIO_PDDR_BS (MCF_IPSBAR + 0x100030)
+#define MCFGPIO_PDDR_IRQ (MCF_IPSBAR + 0x100031)
+#define MCFGPIO_PDDR_USBH (MCF_IPSBAR + 0x100032)
+#define MCFGPIO_PDDR_USBL (MCF_IPSBAR + 0x100033)
+#define MCFGPIO_PDDR_UARTH (MCF_IPSBAR + 0x100034)
+
+#define MCFGPIO_PPDSDR_BUSCTL (MCF_IPSBAR + 0x10003C)
+#define MCFGPIO_PPDSDR_ADDR (MCF_IPSBAR + 0x10003D)
+#define MCFGPIO_PPDSDR_CS (MCF_IPSBAR + 0x100040)
+#define MCFGPIO_PPDSDR_FEC0H (MCF_IPSBAR + 0x100042)
+#define MCFGPIO_PPDSDR_FEC0L (MCF_IPSBAR + 0x100043)
+#define MCFGPIO_PPDSDR_FECI2C (MCF_IPSBAR + 0x100044)
+#define MCFGPIO_PPDSDR_QSPI (MCF_IPSBAR + 0x100045)
+#define MCFGPIO_PPDSDR_SDRAM (MCF_IPSBAR + 0x100046)
+#define MCFGPIO_PPDSDR_TIMERH (MCF_IPSBAR + 0x100047)
+#define MCFGPIO_PPDSDR_TIMERL (MCF_IPSBAR + 0x100048)
+#define MCFGPIO_PPDSDR_UARTL (MCF_IPSBAR + 0x100049)
+#define MCFGPIO_PPDSDR_FEC1H (MCF_IPSBAR + 0x10004A)
+#define MCFGPIO_PPDSDR_FEC1L (MCF_IPSBAR + 0x10004B)
+#define MCFGPIO_PPDSDR_BS (MCF_IPSBAR + 0x10004C)
+#define MCFGPIO_PPDSDR_IRQ (MCF_IPSBAR + 0x10004D)
+#define MCFGPIO_PPDSDR_USBH (MCF_IPSBAR + 0x10004E)
+#define MCFGPIO_PPDSDR_USBL (MCF_IPSBAR + 0x10004F)
+#define MCFGPIO_PPDSDR_UARTH (MCF_IPSBAR + 0x100050)
+
+#define MCFGPIO_PCLRR_BUSCTL (MCF_IPSBAR + 0x100058)
+#define MCFGPIO_PCLRR_ADDR (MCF_IPSBAR + 0x100059)
+#define MCFGPIO_PCLRR_CS (MCF_IPSBAR + 0x10005C)
+#define MCFGPIO_PCLRR_FEC0H (MCF_IPSBAR + 0x10005E)
+#define MCFGPIO_PCLRR_FEC0L (MCF_IPSBAR + 0x10005F)
+#define MCFGPIO_PCLRR_FECI2C (MCF_IPSBAR + 0x100060)
+#define MCFGPIO_PCLRR_QSPI (MCF_IPSBAR + 0x100061)
+#define MCFGPIO_PCLRR_SDRAM (MCF_IPSBAR + 0x100062)
+#define MCFGPIO_PCLRR_TIMERH (MCF_IPSBAR + 0x100063)
+#define MCFGPIO_PCLRR_TIMERL (MCF_IPSBAR + 0x100064)
+#define MCFGPIO_PCLRR_UARTL (MCF_IPSBAR + 0x100065)
+#define MCFGPIO_PCLRR_FEC1H (MCF_IPSBAR + 0x100066)
+#define MCFGPIO_PCLRR_FEC1L (MCF_IPSBAR + 0x100067)
+#define MCFGPIO_PCLRR_BS (MCF_IPSBAR + 0x100068)
+#define MCFGPIO_PCLRR_IRQ (MCF_IPSBAR + 0x100069)
+#define MCFGPIO_PCLRR_USBH (MCF_IPSBAR + 0x10006A)
+#define MCFGPIO_PCLRR_USBL (MCF_IPSBAR + 0x10006B)
+#define MCFGPIO_PCLRR_UARTH (MCF_IPSBAR + 0x10006C)
+
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PODR MCFGPIO_PODR_BUSCTL
+#define MCFGPIO_PDDR MCFGPIO_PDDR_BUSCTL
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_BUSCTL
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_BUSCTL
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_BUSCTL
+
+#define MCFGPIO_PIN_MAX 148
+#define MCFGPIO_IRQ_MAX 8
+#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+#endif
+
+/*
+ * EPort
+ */
+
+#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002)
+#define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004)
+#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+
+
/*
* GPIO pins setups to enable the UARTs.
*/
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index d79c49f8134a..e2ad1f42b657 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -41,6 +41,157 @@
#define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */
/*
+ * GPIO registers
+ */
+#define MCFGPIO_PORTA (MCF_IPSBAR + 0x00100000)
+#define MCFGPIO_PORTB (MCF_IPSBAR + 0x00100001)
+#define MCFGPIO_PORTC (MCF_IPSBAR + 0x00100002)
+#define MCFGPIO_PORTD (MCF_IPSBAR + 0x00100003)
+#define MCFGPIO_PORTE (MCF_IPSBAR + 0x00100004)
+#define MCFGPIO_PORTF (MCF_IPSBAR + 0x00100005)
+#define MCFGPIO_PORTG (MCF_IPSBAR + 0x00100006)
+#define MCFGPIO_PORTH (MCF_IPSBAR + 0x00100007)
+#define MCFGPIO_PORTJ (MCF_IPSBAR + 0x00100008)
+#define MCFGPIO_PORTDD (MCF_IPSBAR + 0x00100009)
+#define MCFGPIO_PORTEH (MCF_IPSBAR + 0x0010000A)
+#define MCFGPIO_PORTEL (MCF_IPSBAR + 0x0010000B)
+#define MCFGPIO_PORTAS (MCF_IPSBAR + 0x0010000C)
+#define MCFGPIO_PORTQS (MCF_IPSBAR + 0x0010000D)
+#define MCFGPIO_PORTSD (MCF_IPSBAR + 0x0010000E)
+#define MCFGPIO_PORTTC (MCF_IPSBAR + 0x0010000F)
+#define MCFGPIO_PORTTD (MCF_IPSBAR + 0x00100010)
+#define MCFGPIO_PORTUA (MCF_IPSBAR + 0x00100011)
+
+#define MCFGPIO_DDRA (MCF_IPSBAR + 0x00100014)
+#define MCFGPIO_DDRB (MCF_IPSBAR + 0x00100015)
+#define MCFGPIO_DDRC (MCF_IPSBAR + 0x00100016)
+#define MCFGPIO_DDRD (MCF_IPSBAR + 0x00100017)
+#define MCFGPIO_DDRE (MCF_IPSBAR + 0x00100018)
+#define MCFGPIO_DDRF (MCF_IPSBAR + 0x00100019)
+#define MCFGPIO_DDRG (MCF_IPSBAR + 0x0010001A)
+#define MCFGPIO_DDRH (MCF_IPSBAR + 0x0010001B)
+#define MCFGPIO_DDRJ (MCF_IPSBAR + 0x0010001C)
+#define MCFGPIO_DDRDD (MCF_IPSBAR + 0x0010001D)
+#define MCFGPIO_DDREH (MCF_IPSBAR + 0x0010001E)
+#define MCFGPIO_DDREL (MCF_IPSBAR + 0x0010001F)
+#define MCFGPIO_DDRAS (MCF_IPSBAR + 0x00100020)
+#define MCFGPIO_DDRQS (MCF_IPSBAR + 0x00100021)
+#define MCFGPIO_DDRSD (MCF_IPSBAR + 0x00100022)
+#define MCFGPIO_DDRTC (MCF_IPSBAR + 0x00100023)
+#define MCFGPIO_DDRTD (MCF_IPSBAR + 0x00100024)
+#define MCFGPIO_DDRUA (MCF_IPSBAR + 0x00100025)
+
+#define MCFGPIO_PORTAP (MCF_IPSBAR + 0x00100028)
+#define MCFGPIO_PORTBP (MCF_IPSBAR + 0x00100029)
+#define MCFGPIO_PORTCP (MCF_IPSBAR + 0x0010002A)
+#define MCFGPIO_PORTDP (MCF_IPSBAR + 0x0010002B)
+#define MCFGPIO_PORTEP (MCF_IPSBAR + 0x0010002C)
+#define MCFGPIO_PORTFP (MCF_IPSBAR + 0x0010002D)
+#define MCFGPIO_PORTGP (MCF_IPSBAR + 0x0010002E)
+#define MCFGPIO_PORTHP (MCF_IPSBAR + 0x0010002F)
+#define MCFGPIO_PORTJP (MCF_IPSBAR + 0x00100030)
+#define MCFGPIO_PORTDDP (MCF_IPSBAR + 0x00100031)
+#define MCFGPIO_PORTEHP (MCF_IPSBAR + 0x00100032)
+#define MCFGPIO_PORTELP (MCF_IPSBAR + 0x00100033)
+#define MCFGPIO_PORTASP (MCF_IPSBAR + 0x00100034)
+#define MCFGPIO_PORTQSP (MCF_IPSBAR + 0x00100035)
+#define MCFGPIO_PORTSDP (MCF_IPSBAR + 0x00100036)
+#define MCFGPIO_PORTTCP (MCF_IPSBAR + 0x00100037)
+#define MCFGPIO_PORTTDP (MCF_IPSBAR + 0x00100038)
+#define MCFGPIO_PORTUAP (MCF_IPSBAR + 0x00100039)
+
+#define MCFGPIO_SETA (MCF_IPSBAR + 0x00100028)
+#define MCFGPIO_SETB (MCF_IPSBAR + 0x00100029)
+#define MCFGPIO_SETC (MCF_IPSBAR + 0x0010002A)
+#define MCFGPIO_SETD (MCF_IPSBAR + 0x0010002B)
+#define MCFGPIO_SETE (MCF_IPSBAR + 0x0010002C)
+#define MCFGPIO_SETF (MCF_IPSBAR + 0x0010002D)
+#define MCFGPIO_SETG (MCF_IPSBAR + 0x0010002E)
+#define MCFGPIO_SETH (MCF_IPSBAR + 0x0010002F)
+#define MCFGPIO_SETJ (MCF_IPSBAR + 0x00100030)
+#define MCFGPIO_SETDD (MCF_IPSBAR + 0x00100031)
+#define MCFGPIO_SETEH (MCF_IPSBAR + 0x00100032)
+#define MCFGPIO_SETEL (MCF_IPSBAR + 0x00100033)
+#define MCFGPIO_SETAS (MCF_IPSBAR + 0x00100034)
+#define MCFGPIO_SETQS (MCF_IPSBAR + 0x00100035)
+#define MCFGPIO_SETSD (MCF_IPSBAR + 0x00100036)
+#define MCFGPIO_SETTC (MCF_IPSBAR + 0x00100037)
+#define MCFGPIO_SETTD (MCF_IPSBAR + 0x00100038)
+#define MCFGPIO_SETUA (MCF_IPSBAR + 0x00100039)
+
+#define MCFGPIO_CLRA (MCF_IPSBAR + 0x0010003C)
+#define MCFGPIO_CLRB (MCF_IPSBAR + 0x0010003D)
+#define MCFGPIO_CLRC (MCF_IPSBAR + 0x0010003E)
+#define MCFGPIO_CLRD (MCF_IPSBAR + 0x0010003F)
+#define MCFGPIO_CLRE (MCF_IPSBAR + 0x00100040)
+#define MCFGPIO_CLRF (MCF_IPSBAR + 0x00100041)
+#define MCFGPIO_CLRG (MCF_IPSBAR + 0x00100042)
+#define MCFGPIO_CLRH (MCF_IPSBAR + 0x00100043)
+#define MCFGPIO_CLRJ (MCF_IPSBAR + 0x00100044)
+#define MCFGPIO_CLRDD (MCF_IPSBAR + 0x00100045)
+#define MCFGPIO_CLREH (MCF_IPSBAR + 0x00100046)
+#define MCFGPIO_CLREL (MCF_IPSBAR + 0x00100047)
+#define MCFGPIO_CLRAS (MCF_IPSBAR + 0x00100048)
+#define MCFGPIO_CLRQS (MCF_IPSBAR + 0x00100049)
+#define MCFGPIO_CLRSD (MCF_IPSBAR + 0x0010004A)
+#define MCFGPIO_CLRTC (MCF_IPSBAR + 0x0010004B)
+#define MCFGPIO_CLRTD (MCF_IPSBAR + 0x0010004C)
+#define MCFGPIO_CLRUA (MCF_IPSBAR + 0x0010004D)
+
+#define MCFGPIO_PBCDPAR (MCF_IPSBAR + 0x00100050)
+#define MCFGPIO_PFPAR (MCF_IPSBAR + 0x00100051)
+#define MCFGPIO_PEPAR (MCF_IPSBAR + 0x00100052)
+#define MCFGPIO_PJPAR (MCF_IPSBAR + 0x00100054)
+#define MCFGPIO_PSDPAR (MCF_IPSBAR + 0x00100055)
+#define MCFGPIO_PASPAR (MCF_IPSBAR + 0x00100056)
+#define MCFGPIO_PEHLPAR (MCF_IPSBAR + 0x00100058)
+#define MCFGPIO_PQSPAR (MCF_IPSBAR + 0x00100059)
+#define MCFGPIO_PTCPAR (MCF_IPSBAR + 0x0010005A)
+#define MCFGPIO_PTDPAR (MCF_IPSBAR + 0x0010005B)
+#define MCFGPIO_PUAPAR (MCF_IPSBAR + 0x0010005C)
+
+/*
+ * Edge Port registers
+ */
+#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x00130000)
+#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x00130002)
+#define MCFEPORT_EPIER (MCF_IPSBAR + 0x00130003)
+#define MCFEPORT_EPDR (MCF_IPSBAR + 0x00130004)
+#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x00130005)
+#define MCFEPORT_EPFR (MCF_IPSBAR + 0x00130006)
+
+/*
+ * Queued ADC registers
+ */
+#define MCFQADC_PORTQA (MCF_IPSBAR + 0x00190006)
+#define MCFQADC_PORTQB (MCF_IPSBAR + 0x00190007)
+#define MCFQADC_DDRQA (MCF_IPSBAR + 0x00190008)
+#define MCFQADC_DDRQB (MCF_IPSBAR + 0x00190009)
+
+/*
+ * General Purpose Timers registers
+ */
+#define MCFGPTA_GPTPORT (MCF_IPSBAR + 0x001A001D)
+#define MCFGPTA_GPTDDR (MCF_IPSBAR + 0x001A001E)
+#define MCFGPTB_GPTPORT (MCF_IPSBAR + 0x001B001D)
+#define MCFGPTB_GPTDDR (MCF_IPSBAR + 0x001B001E)
+/*
+ *
+ * definitions for generic gpio support
+ *
+ */
+#define MCFGPIO_PODR MCFGPIO_PORTA /* port output data */
+#define MCFGPIO_PDDR MCFGPIO_DDRA /* port data direction */
+#define MCFGPIO_PPDR MCFGPIO_PORTAP /* port pin data */
+#define MCFGPIO_SETR MCFGPIO_SETA /* set output */
+#define MCFGPIO_CLRR MCFGPIO_CLRA /* clr output */
+
+#define MCFGPIO_IRQ_MAX 8
+#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+#define MCFGPIO_PIN_MAX 180
+
+
+/*
* Derek Cheung - 6 Feb 2005
* add I2C and QSPI register definition using Freescale's MCF5282
*/
diff --git a/arch/m68k/include/asm/m5307sim.h b/arch/m68k/include/asm/m5307sim.h
index 5886728409c0..c6830e5b54ce 100644
--- a/arch/m68k/include/asm/m5307sim.h
+++ b/arch/m68k/include/asm/m5307sim.h
@@ -90,8 +90,15 @@
#define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */
#define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */
-#define MCFSIM_PADDR 0x244 /* Parallel Direction (r/w) */
-#define MCFSIM_PADAT 0x248 /* Parallel Data (r/w) */
+#define MCFSIM_PADDR (MCF_MBAR + 0x244)
+#define MCFSIM_PADAT (MCF_MBAR + 0x248)
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PIN_MAX 16
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
/* Definition offset address for CS2-7 -- old mask 5307 */
@@ -117,22 +124,6 @@
#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
-#if defined(CONFIG_M5307)
-#define MCFSIM_IMR_MASKALL 0x3fffe /* All SIM intr sources */
-#endif
-
-/*
- * Macro to set IMR register. It is 32 bits on the 5307.
- */
-#define mcf_getimr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR))
-
-#define mcf_setimr(imr) \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR)) = (imr);
-
-#define mcf_getipr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPR))
-
/*
* Some symbol defines for the Parallel Port Pin Assignment Register
@@ -149,6 +140,11 @@
#define IRQ3_LEVEL6 0x40
#define IRQ1_LEVEL2 0x20
+/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
+#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
/*
* Define the Cache register flags.
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
index eb7fd4448947..27388d38210b 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m532xsim.h
@@ -56,47 +56,21 @@
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
-#define MCFSIM_IMR_MASKALL 0xFFFFFFFF /* All SIM intr sources */
-
-#define MCFSIM_IMR_SIMR0 0xFC04801C
-#define MCFSIM_IMR_SIMR1 0xFC04C01C
-#define MCFSIM_IMR_CIMR0 0xFC04801D
-#define MCFSIM_IMR_CIMR1 0xFC04C01D
+#define MCFINTC0_SIMR 0xFC04801C
+#define MCFINTC0_CIMR 0xFC04801D
+#define MCFINTC0_ICR0 0xFC048040
+#define MCFINTC1_SIMR 0xFC04C01C
+#define MCFINTC1_CIMR 0xFC04C01D
+#define MCFINTC1_ICR0 0xFC04C040
#define MCFSIM_ICR_TIMER1 (0xFC048040+32)
#define MCFSIM_ICR_TIMER2 (0xFC048040+33)
-
/*
- * Macro to set IMR register. It is 32 bits on the 5307.
+ * Define system peripheral IRQ usage.
*/
-#define mcf_getimr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR))
-
-#define mcf_setimr(imr) \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR)) = (imr);
-
-#define mcf_getipr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPR))
-
-#define mcf_getiprl() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPRL))
-
-#define mcf_getiprh() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPRH))
-
-
-#define mcf_enable_irq0(irq) \
- *((volatile unsigned char*) (MCFSIM_IMR_CIMR0)) = (irq);
-
-#define mcf_enable_irq1(irq) \
- *((volatile unsigned char*) (MCFSIM_IMR_CIMR1)) = (irq);
-
-#define mcf_disable_irq0(irq) \
- *((volatile unsigned char*) (MCFSIM_IMR_SIMR0)) = (irq);
-
-#define mcf_disable_irq1(irq) \
- *((volatile unsigned char*) (MCFSIM_IMR_SIMR1)) = (irq);
+#define MCF_IRQ_TIMER (64 + 32) /* Timer0 */
+#define MCF_IRQ_PROFILER (64 + 33) /* Timer1 */
/*
* Define the Cache register flags.
@@ -422,70 +396,70 @@
*********************************************************************/
/* Register read/write macros */
-#define MCF_GPIO_PODR_FECH MCF_REG08(0xFC0A4000)
-#define MCF_GPIO_PODR_FECL MCF_REG08(0xFC0A4001)
-#define MCF_GPIO_PODR_SSI MCF_REG08(0xFC0A4002)
-#define MCF_GPIO_PODR_BUSCTL MCF_REG08(0xFC0A4003)
-#define MCF_GPIO_PODR_BE MCF_REG08(0xFC0A4004)
-#define MCF_GPIO_PODR_CS MCF_REG08(0xFC0A4005)
-#define MCF_GPIO_PODR_PWM MCF_REG08(0xFC0A4006)
-#define MCF_GPIO_PODR_FECI2C MCF_REG08(0xFC0A4007)
-#define MCF_GPIO_PODR_UART MCF_REG08(0xFC0A4009)
-#define MCF_GPIO_PODR_QSPI MCF_REG08(0xFC0A400A)
-#define MCF_GPIO_PODR_TIMER MCF_REG08(0xFC0A400B)
-#define MCF_GPIO_PODR_LCDDATAH MCF_REG08(0xFC0A400D)
-#define MCF_GPIO_PODR_LCDDATAM MCF_REG08(0xFC0A400E)
-#define MCF_GPIO_PODR_LCDDATAL MCF_REG08(0xFC0A400F)
-#define MCF_GPIO_PODR_LCDCTLH MCF_REG08(0xFC0A4010)
-#define MCF_GPIO_PODR_LCDCTLL MCF_REG08(0xFC0A4011)
-#define MCF_GPIO_PDDR_FECH MCF_REG08(0xFC0A4014)
-#define MCF_GPIO_PDDR_FECL MCF_REG08(0xFC0A4015)
-#define MCF_GPIO_PDDR_SSI MCF_REG08(0xFC0A4016)
-#define MCF_GPIO_PDDR_BUSCTL MCF_REG08(0xFC0A4017)
-#define MCF_GPIO_PDDR_BE MCF_REG08(0xFC0A4018)
-#define MCF_GPIO_PDDR_CS MCF_REG08(0xFC0A4019)
-#define MCF_GPIO_PDDR_PWM MCF_REG08(0xFC0A401A)
-#define MCF_GPIO_PDDR_FECI2C MCF_REG08(0xFC0A401B)
-#define MCF_GPIO_PDDR_UART MCF_REG08(0xFC0A401C)
-#define MCF_GPIO_PDDR_QSPI MCF_REG08(0xFC0A401E)
-#define MCF_GPIO_PDDR_TIMER MCF_REG08(0xFC0A401F)
-#define MCF_GPIO_PDDR_LCDDATAH MCF_REG08(0xFC0A4021)
-#define MCF_GPIO_PDDR_LCDDATAM MCF_REG08(0xFC0A4022)
-#define MCF_GPIO_PDDR_LCDDATAL MCF_REG08(0xFC0A4023)
-#define MCF_GPIO_PDDR_LCDCTLH MCF_REG08(0xFC0A4024)
-#define MCF_GPIO_PDDR_LCDCTLL MCF_REG08(0xFC0A4025)
-#define MCF_GPIO_PPDSDR_FECH MCF_REG08(0xFC0A4028)
-#define MCF_GPIO_PPDSDR_FECL MCF_REG08(0xFC0A4029)
-#define MCF_GPIO_PPDSDR_SSI MCF_REG08(0xFC0A402A)
-#define MCF_GPIO_PPDSDR_BUSCTL MCF_REG08(0xFC0A402B)
-#define MCF_GPIO_PPDSDR_BE MCF_REG08(0xFC0A402C)
-#define MCF_GPIO_PPDSDR_CS MCF_REG08(0xFC0A402D)
-#define MCF_GPIO_PPDSDR_PWM MCF_REG08(0xFC0A402E)
-#define MCF_GPIO_PPDSDR_FECI2C MCF_REG08(0xFC0A402F)
-#define MCF_GPIO_PPDSDR_UART MCF_REG08(0xFC0A4031)
-#define MCF_GPIO_PPDSDR_QSPI MCF_REG08(0xFC0A4032)
-#define MCF_GPIO_PPDSDR_TIMER MCF_REG08(0xFC0A4033)
-#define MCF_GPIO_PPDSDR_LCDDATAH MCF_REG08(0xFC0A4035)
-#define MCF_GPIO_PPDSDR_LCDDATAM MCF_REG08(0xFC0A4036)
-#define MCF_GPIO_PPDSDR_LCDDATAL MCF_REG08(0xFC0A4037)
-#define MCF_GPIO_PPDSDR_LCDCTLH MCF_REG08(0xFC0A4038)
-#define MCF_GPIO_PPDSDR_LCDCTLL MCF_REG08(0xFC0A4039)
-#define MCF_GPIO_PCLRR_FECH MCF_REG08(0xFC0A403C)
-#define MCF_GPIO_PCLRR_FECL MCF_REG08(0xFC0A403D)
-#define MCF_GPIO_PCLRR_SSI MCF_REG08(0xFC0A403E)
-#define MCF_GPIO_PCLRR_BUSCTL MCF_REG08(0xFC0A403F)
-#define MCF_GPIO_PCLRR_BE MCF_REG08(0xFC0A4040)
-#define MCF_GPIO_PCLRR_CS MCF_REG08(0xFC0A4041)
-#define MCF_GPIO_PCLRR_PWM MCF_REG08(0xFC0A4042)
-#define MCF_GPIO_PCLRR_FECI2C MCF_REG08(0xFC0A4043)
-#define MCF_GPIO_PCLRR_UART MCF_REG08(0xFC0A4045)
-#define MCF_GPIO_PCLRR_QSPI MCF_REG08(0xFC0A4046)
-#define MCF_GPIO_PCLRR_TIMER MCF_REG08(0xFC0A4047)
-#define MCF_GPIO_PCLRR_LCDDATAH MCF_REG08(0xFC0A4049)
-#define MCF_GPIO_PCLRR_LCDDATAM MCF_REG08(0xFC0A404A)
-#define MCF_GPIO_PCLRR_LCDDATAL MCF_REG08(0xFC0A404B)
-#define MCF_GPIO_PCLRR_LCDCTLH MCF_REG08(0xFC0A404C)
-#define MCF_GPIO_PCLRR_LCDCTLL MCF_REG08(0xFC0A404D)
+#define MCFGPIO_PODR_FECH (0xFC0A4000)
+#define MCFGPIO_PODR_FECL (0xFC0A4001)
+#define MCFGPIO_PODR_SSI (0xFC0A4002)
+#define MCFGPIO_PODR_BUSCTL (0xFC0A4003)
+#define MCFGPIO_PODR_BE (0xFC0A4004)
+#define MCFGPIO_PODR_CS (0xFC0A4005)
+#define MCFGPIO_PODR_PWM (0xFC0A4006)
+#define MCFGPIO_PODR_FECI2C (0xFC0A4007)
+#define MCFGPIO_PODR_UART (0xFC0A4009)
+#define MCFGPIO_PODR_QSPI (0xFC0A400A)
+#define MCFGPIO_PODR_TIMER (0xFC0A400B)
+#define MCFGPIO_PODR_LCDDATAH (0xFC0A400D)
+#define MCFGPIO_PODR_LCDDATAM (0xFC0A400E)
+#define MCFGPIO_PODR_LCDDATAL (0xFC0A400F)
+#define MCFGPIO_PODR_LCDCTLH (0xFC0A4010)
+#define MCFGPIO_PODR_LCDCTLL (0xFC0A4011)
+#define MCFGPIO_PDDR_FECH (0xFC0A4014)
+#define MCFGPIO_PDDR_FECL (0xFC0A4015)
+#define MCFGPIO_PDDR_SSI (0xFC0A4016)
+#define MCFGPIO_PDDR_BUSCTL (0xFC0A4017)
+#define MCFGPIO_PDDR_BE (0xFC0A4018)
+#define MCFGPIO_PDDR_CS (0xFC0A4019)
+#define MCFGPIO_PDDR_PWM (0xFC0A401A)
+#define MCFGPIO_PDDR_FECI2C (0xFC0A401B)
+#define MCFGPIO_PDDR_UART (0xFC0A401C)
+#define MCFGPIO_PDDR_QSPI (0xFC0A401E)
+#define MCFGPIO_PDDR_TIMER (0xFC0A401F)
+#define MCFGPIO_PDDR_LCDDATAH (0xFC0A4021)
+#define MCFGPIO_PDDR_LCDDATAM (0xFC0A4022)
+#define MCFGPIO_PDDR_LCDDATAL (0xFC0A4023)
+#define MCFGPIO_PDDR_LCDCTLH (0xFC0A4024)
+#define MCFGPIO_PDDR_LCDCTLL (0xFC0A4025)
+#define MCFGPIO_PPDSDR_FECH (0xFC0A4028)
+#define MCFGPIO_PPDSDR_FECL (0xFC0A4029)
+#define MCFGPIO_PPDSDR_SSI (0xFC0A402A)
+#define MCFGPIO_PPDSDR_BUSCTL (0xFC0A402B)
+#define MCFGPIO_PPDSDR_BE (0xFC0A402C)
+#define MCFGPIO_PPDSDR_CS (0xFC0A402D)
+#define MCFGPIO_PPDSDR_PWM (0xFC0A402E)
+#define MCFGPIO_PPDSDR_FECI2C (0xFC0A402F)
+#define MCFGPIO_PPDSDR_UART (0xFC0A4031)
+#define MCFGPIO_PPDSDR_QSPI (0xFC0A4032)
+#define MCFGPIO_PPDSDR_TIMER (0xFC0A4033)
+#define MCFGPIO_PPDSDR_LCDDATAH (0xFC0A4035)
+#define MCFGPIO_PPDSDR_LCDDATAM (0xFC0A4036)
+#define MCFGPIO_PPDSDR_LCDDATAL (0xFC0A4037)
+#define MCFGPIO_PPDSDR_LCDCTLH (0xFC0A4038)
+#define MCFGPIO_PPDSDR_LCDCTLL (0xFC0A4039)
+#define MCFGPIO_PCLRR_FECH (0xFC0A403C)
+#define MCFGPIO_PCLRR_FECL (0xFC0A403D)
+#define MCFGPIO_PCLRR_SSI (0xFC0A403E)
+#define MCFGPIO_PCLRR_BUSCTL (0xFC0A403F)
+#define MCFGPIO_PCLRR_BE (0xFC0A4040)
+#define MCFGPIO_PCLRR_CS (0xFC0A4041)
+#define MCFGPIO_PCLRR_PWM (0xFC0A4042)
+#define MCFGPIO_PCLRR_FECI2C (0xFC0A4043)
+#define MCFGPIO_PCLRR_UART (0xFC0A4045)
+#define MCFGPIO_PCLRR_QSPI (0xFC0A4046)
+#define MCFGPIO_PCLRR_TIMER (0xFC0A4047)
+#define MCFGPIO_PCLRR_LCDDATAH (0xFC0A4049)
+#define MCFGPIO_PCLRR_LCDDATAM (0xFC0A404A)
+#define MCFGPIO_PCLRR_LCDDATAL (0xFC0A404B)
+#define MCFGPIO_PCLRR_LCDCTLH (0xFC0A404C)
+#define MCFGPIO_PCLRR_LCDCTLL (0xFC0A404D)
#define MCF_GPIO_PAR_FEC MCF_REG08(0xFC0A4050)
#define MCF_GPIO_PAR_PWM MCF_REG08(0xFC0A4051)
#define MCF_GPIO_PAR_BUSCTL MCF_REG08(0xFC0A4052)
@@ -1187,6 +1161,20 @@
/* Bit definitions and macros for MCF_GPIO_DSCR_IRQ */
#define MCF_GPIO_DSCR_IRQ_IRQ_DSE(x) (((x)&0x03)<<0)
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PODR MCFGPIO_PODR_FECH
+#define MCFGPIO_PDDR MCFGPIO_PDDR_FECH
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_FECH
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_FECH
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_FECH
+
+#define MCFGPIO_PIN_MAX 136
+#define MCFGPIO_IRQ_MAX 8
+#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+
+
/*********************************************************************
*
* Interrupt Controller (INTC)
@@ -2154,12 +2142,12 @@
*********************************************************************/
/* Register read/write macros */
-#define MCF_EPORT_EPPAR MCF_REG16(0xFC094000)
-#define MCF_EPORT_EPDDR MCF_REG08(0xFC094002)
-#define MCF_EPORT_EPIER MCF_REG08(0xFC094003)
-#define MCF_EPORT_EPDR MCF_REG08(0xFC094004)
-#define MCF_EPORT_EPPDR MCF_REG08(0xFC094005)
-#define MCF_EPORT_EPFR MCF_REG08(0xFC094006)
+#define MCFEPORT_EPPAR (0xFC094000)
+#define MCFEPORT_EPDDR (0xFC094002)
+#define MCFEPORT_EPIER (0xFC094003)
+#define MCFEPORT_EPDR (0xFC094004)
+#define MCFEPORT_EPPDR (0xFC094005)
+#define MCFEPORT_EPFR (0xFC094006)
/* Bit definitions and macros for MCF_EPORT_EPPAR */
#define MCF_EPORT_EPPAR_EPPA1(x) (((x)&0x0003)<<2)
diff --git a/arch/m68k/include/asm/m5407sim.h b/arch/m68k/include/asm/m5407sim.h
index cc22c4a53005..c399abbf953c 100644
--- a/arch/m68k/include/asm/m5407sim.h
+++ b/arch/m68k/include/asm/m5407sim.h
@@ -73,9 +73,15 @@
#define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */
#define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */
-#define MCFSIM_PADDR 0x244 /* Parallel Direction (r/w) */
-#define MCFSIM_PADAT 0x248 /* Parallel Data (r/w) */
+#define MCFSIM_PADDR (MCF_MBAR + 0x244)
+#define MCFSIM_PADAT (MCF_MBAR + 0x248)
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PIN_MAX 16
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
/*
* Some symbol defines for the above...
@@ -91,19 +97,6 @@
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
/*
- * Macro to set IMR register. It is 32 bits on the 5407.
- */
-#define mcf_getimr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR))
-
-#define mcf_setimr(imr) \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IMR)) = (imr);
-
-#define mcf_getipr() \
- *((volatile unsigned long *) (MCF_MBAR + MCFSIM_IPR))
-
-
-/*
* Some symbol defines for the Parallel Port Pin Assignment Register
*/
#define MCFSIM_PAR_DREQ0 0x40 /* Set to select DREQ0 input */
@@ -118,6 +111,11 @@
#define IRQ3_LEVEL6 0x40
#define IRQ1_LEVEL2 0x20
+/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
+#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
/*
* Define the Cache register flags.
diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h
new file mode 100644
index 000000000000..ee5e4ccce89e
--- /dev/null
+++ b/arch/m68k/include/asm/mcfgpio.h
@@ -0,0 +1,40 @@
+/*
+ * Coldfire generic GPIO support.
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef mcfgpio_h
+#define mcfgpio_h
+
+#include <linux/io.h>
+#include <asm-generic/gpio.h>
+
+struct mcf_gpio_chip {
+ struct gpio_chip gpio_chip;
+ void __iomem *pddr;
+ void __iomem *podr;
+ void __iomem *ppdr;
+ void __iomem *setr;
+ void __iomem *clrr;
+ const u8 *gpio_to_pinmux;
+};
+
+int mcf_gpio_direction_input(struct gpio_chip *, unsigned);
+int mcf_gpio_get_value(struct gpio_chip *, unsigned);
+int mcf_gpio_direction_output(struct gpio_chip *, unsigned, int);
+void mcf_gpio_set_value(struct gpio_chip *, unsigned, int);
+void mcf_gpio_set_value_fast(struct gpio_chip *, unsigned, int);
+int mcf_gpio_request(struct gpio_chip *, unsigned);
+void mcf_gpio_free(struct gpio_chip *, unsigned);
+
+#endif
diff --git a/arch/m68k/include/asm/mcfintc.h b/arch/m68k/include/asm/mcfintc.h
new file mode 100644
index 000000000000..4183320a3813
--- /dev/null
+++ b/arch/m68k/include/asm/mcfintc.h
@@ -0,0 +1,89 @@
+/****************************************************************************/
+
+/*
+ * mcfintc.h -- support definitions for the simple ColdFire
+ * Interrupt Controller
+ *
+ * (C) Copyright 2009, Greg Ungerer <gerg@uclinux.org>
+ */
+
+/****************************************************************************/
+#ifndef mcfintc_h
+#define mcfintc_h
+/****************************************************************************/
+
+/*
+ * Most of the older ColdFire parts use the same simple interrupt
+ * controller. This is currently used on the 5206, 5206e, 5249, 5307
+ * and 5407 parts.
+ *
+ * The builtin peripherals are masked through dedicated bits in the
+ * Interrupt Mask register (IMR) - and this is not indexed (or in any way
+ * related to) the actual interrupt number they use. So knowing the IRQ
+ * number doesn't explicitly map to a certain internal device for
+ * interrupt control purposes.
+ */
+
+/*
+ * Bit definitions for the ICR family of registers.
+ */
+#define MCFSIM_ICR_AUTOVEC 0x80 /* Auto-vectored intr */
+#define MCFSIM_ICR_LEVEL0 0x00 /* Level 0 intr */
+#define MCFSIM_ICR_LEVEL1 0x04 /* Level 1 intr */
+#define MCFSIM_ICR_LEVEL2 0x08 /* Level 2 intr */
+#define MCFSIM_ICR_LEVEL3 0x0c /* Level 3 intr */
+#define MCFSIM_ICR_LEVEL4 0x10 /* Level 4 intr */
+#define MCFSIM_ICR_LEVEL5 0x14 /* Level 5 intr */
+#define MCFSIM_ICR_LEVEL6 0x18 /* Level 6 intr */
+#define MCFSIM_ICR_LEVEL7 0x1c /* Level 7 intr */
+
+#define MCFSIM_ICR_PRI0 0x00 /* Priority 0 intr */
+#define MCFSIM_ICR_PRI1 0x01 /* Priority 1 intr */
+#define MCFSIM_ICR_PRI2 0x02 /* Priority 2 intr */
+#define MCFSIM_ICR_PRI3 0x03 /* Priority 3 intr */
+
+/*
+ * IMR bit position definitions. Not all ColdFire parts with this interrupt
+ * controller actually support all of these interrupt sources. But the bit
+ * numbers are the same in all cores.
+ */
+#define MCFINTC_EINT1 1 /* External int #1 */
+#define MCFINTC_EINT2 2 /* External int #2 */
+#define MCFINTC_EINT3 3 /* External int #3 */
+#define MCFINTC_EINT4 4 /* External int #4 */
+#define MCFINTC_EINT5 5 /* External int #5 */
+#define MCFINTC_EINT6 6 /* External int #6 */
+#define MCFINTC_EINT7 7 /* External int #7 */
+#define MCFINTC_SWT 8 /* Software Watchdog */
+#define MCFINTC_TIMER1 9
+#define MCFINTC_TIMER2 10
+#define MCFINTC_I2C 11 /* I2C / MBUS */
+#define MCFINTC_UART0 12
+#define MCFINTC_UART1 13
+#define MCFINTC_DMA0 14
+#define MCFINTC_DMA1 15
+#define MCFINTC_DMA2 16
+#define MCFINTC_DMA3 17
+#define MCFINTC_QSPI 18
+
+#ifndef __ASSEMBLER__
+
+/*
+ * There is no one-is-one correspondance between the interrupt number (irq)
+ * and the bit fields on the mask register. So we create a per-cpu type
+ * mapping of irq to mask bit. The CPU platform code needs to register
+ * its supported irq's at init time, using this function.
+ */
+extern unsigned char mcf_irq2imr[];
+static inline void mcf_mapirq2imr(int irq, int imr)
+{
+ mcf_irq2imr[irq] = imr;
+}
+
+void mcf_autovector(int irq);
+void mcf_setimr(int index);
+void mcf_clrimr(int index);
+#endif
+
+/****************************************************************************/
+#endif /* mcfintc_h */
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index da3f2ceff3a4..bf621467f35d 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -12,19 +12,21 @@
#define mcfsim_h
/****************************************************************************/
-
/*
- * Include 5204, 5206/e, 5235, 5249, 5270/5271, 5272, 5280/5282,
- * 5307 or 5407 specific addresses.
+ * Include the appropriate ColdFire CPU specific System Integration Module
+ * (SIM) definitions.
*/
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e)
#include <asm/m5206sim.h>
+#include <asm/mcfintc.h>
#elif defined(CONFIG_M520x)
#include <asm/m520xsim.h>
#elif defined(CONFIG_M523x)
#include <asm/m523xsim.h>
+#include <asm/mcfintc.h>
#elif defined(CONFIG_M5249)
#include <asm/m5249sim.h>
+#include <asm/mcfintc.h>
#elif defined(CONFIG_M527x)
#include <asm/m527xsim.h>
#elif defined(CONFIG_M5272)
@@ -33,94 +35,13 @@
#include <asm/m528xsim.h>
#elif defined(CONFIG_M5307)
#include <asm/m5307sim.h>
+#include <asm/mcfintc.h>
#elif defined(CONFIG_M532x)
#include <asm/m532xsim.h>
#elif defined(CONFIG_M5407)
#include <asm/m5407sim.h>
+#include <asm/mcfintc.h>
#endif
-
-/*
- * Define the base address of the SIM within the MBAR address space.
- */
-#define MCFSIM_BASE 0x0 /* Base address of SIM */
-
-
-/*
- * Bit definitions for the ICR family of registers.
- */
-#define MCFSIM_ICR_AUTOVEC 0x80 /* Auto-vectored intr */
-#define MCFSIM_ICR_LEVEL0 0x00 /* Level 0 intr */
-#define MCFSIM_ICR_LEVEL1 0x04 /* Level 1 intr */
-#define MCFSIM_ICR_LEVEL2 0x08 /* Level 2 intr */
-#define MCFSIM_ICR_LEVEL3 0x0c /* Level 3 intr */
-#define MCFSIM_ICR_LEVEL4 0x10 /* Level 4 intr */
-#define MCFSIM_ICR_LEVEL5 0x14 /* Level 5 intr */
-#define MCFSIM_ICR_LEVEL6 0x18 /* Level 6 intr */
-#define MCFSIM_ICR_LEVEL7 0x1c /* Level 7 intr */
-
-#define MCFSIM_ICR_PRI0 0x00 /* Priority 0 intr */
-#define MCFSIM_ICR_PRI1 0x01 /* Priority 1 intr */
-#define MCFSIM_ICR_PRI2 0x02 /* Priority 2 intr */
-#define MCFSIM_ICR_PRI3 0x03 /* Priority 3 intr */
-
-/*
- * Bit definitions for the Interrupt Mask register (IMR).
- */
-#define MCFSIM_IMR_EINT1 0x0002 /* External intr # 1 */
-#define MCFSIM_IMR_EINT2 0x0004 /* External intr # 2 */
-#define MCFSIM_IMR_EINT3 0x0008 /* External intr # 3 */
-#define MCFSIM_IMR_EINT4 0x0010 /* External intr # 4 */
-#define MCFSIM_IMR_EINT5 0x0020 /* External intr # 5 */
-#define MCFSIM_IMR_EINT6 0x0040 /* External intr # 6 */
-#define MCFSIM_IMR_EINT7 0x0080 /* External intr # 7 */
-
-#define MCFSIM_IMR_SWD 0x0100 /* Software Watchdog intr */
-#define MCFSIM_IMR_TIMER1 0x0200 /* TIMER 1 intr */
-#define MCFSIM_IMR_TIMER2 0x0400 /* TIMER 2 intr */
-#define MCFSIM_IMR_MBUS 0x0800 /* MBUS intr */
-#define MCFSIM_IMR_UART1 0x1000 /* UART 1 intr */
-#define MCFSIM_IMR_UART2 0x2000 /* UART 2 intr */
-
-#if defined(CONFIG_M5206e)
-#define MCFSIM_IMR_DMA1 0x4000 /* DMA 1 intr */
-#define MCFSIM_IMR_DMA2 0x8000 /* DMA 2 intr */
-#elif defined(CONFIG_M5249) || defined(CONFIG_M5307)
-#define MCFSIM_IMR_DMA0 0x4000 /* DMA 0 intr */
-#define MCFSIM_IMR_DMA1 0x8000 /* DMA 1 intr */
-#define MCFSIM_IMR_DMA2 0x10000 /* DMA 2 intr */
-#define MCFSIM_IMR_DMA3 0x20000 /* DMA 3 intr */
-#endif
-
-/*
- * Mask for all of the SIM devices. Some parts have more or less
- * SIM devices. This is a catchall for the sandard set.
- */
-#ifndef MCFSIM_IMR_MASKALL
-#define MCFSIM_IMR_MASKALL 0x3ffe /* All intr sources */
-#endif
-
-
-/*
- * PIT interrupt settings, if not found in mXXXXsim.h file.
- */
-#ifndef ICR_INTRCONF
-#define ICR_INTRCONF 0x2b /* PIT1 level 5, priority 3 */
-#endif
-#ifndef MCFPIT_IMR
-#define MCFPIT_IMR MCFINTC_IMRH
-#endif
-#ifndef MCFPIT_IMR_IBIT
-#define MCFPIT_IMR_IBIT (1 << (MCFINT_PIT1 - 32))
-#endif
-
-
-#ifndef __ASSEMBLY__
-/*
- * Definition for the interrupt auto-vectoring support.
- */
-extern void mcf_autovector(unsigned int vec);
-#endif /* __ASSEMBLY__ */
-
/****************************************************************************/
#endif /* mcfsim_h */
diff --git a/arch/m68k/include/asm/mcfsmc.h b/arch/m68k/include/asm/mcfsmc.h
index 2d7a4dbd9683..527bea5d6788 100644
--- a/arch/m68k/include/asm/mcfsmc.h
+++ b/arch/m68k/include/asm/mcfsmc.h
@@ -167,15 +167,15 @@ void smc_remap(unsigned int ioaddr)
static int once = 0;
extern unsigned short ppdata;
if (once++ == 0) {
- *((volatile unsigned short *)(MCF_MBAR+MCFSIM_PADDR)) = 0x00ec;
+ *((volatile unsigned short *)MCFSIM_PADDR) = 0x00ec;
ppdata |= 0x0080;
- *((volatile unsigned short *)(MCF_MBAR+MCFSIM_PADAT)) = ppdata;
+ *((volatile unsigned short *)MCFSIM_PADAT) = ppdata;
outw(0x0001, ioaddr + BANK_SELECT);
outw(0x0001, ioaddr + BANK_SELECT);
outw(0x0067, ioaddr + BASE);
ppdata &= ~0x0080;
- *((volatile unsigned short *)(MCF_MBAR+MCFSIM_PADAT)) = ppdata;
+ *((volatile unsigned short *)MCFSIM_PADAT) = ppdata;
}
*((volatile unsigned short *)(MCF_MBAR+MCFSIM_CSCR3)) = 0x1180;
diff --git a/arch/m68k/include/asm/nettel.h b/arch/m68k/include/asm/nettel.h
index 0299f6a2deeb..4dec2d9fb994 100644
--- a/arch/m68k/include/asm/nettel.h
+++ b/arch/m68k/include/asm/nettel.h
@@ -48,14 +48,14 @@ extern volatile unsigned short ppdata;
static __inline__ unsigned int mcf_getppdata(void)
{
volatile unsigned short *pp;
- pp = (volatile unsigned short *) (MCF_MBAR + MCFSIM_PADAT);
+ pp = (volatile unsigned short *) MCFSIM_PADAT;
return((unsigned int) *pp);
}
static __inline__ void mcf_setppdata(unsigned int mask, unsigned int bits)
{
volatile unsigned short *pp;
- pp = (volatile unsigned short *) (MCF_MBAR + MCFSIM_PADAT);
+ pp = (volatile unsigned short *) MCFSIM_PADAT;
ppdata = (ppdata & ~mask) | bits;
*pp = ppdata;
}
diff --git a/arch/m68k/include/asm/pinmux.h b/arch/m68k/include/asm/pinmux.h
new file mode 100644
index 000000000000..119ee686dbd1
--- /dev/null
+++ b/arch/m68k/include/asm/pinmux.h
@@ -0,0 +1,30 @@
+/*
+ * Coldfire generic GPIO pinmux support.
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef pinmux_h
+#define pinmux_h
+
+#define MCFPINMUX_NONE -1
+
+extern int mcf_pinmux_request(unsigned, unsigned);
+extern void mcf_pinmux_release(unsigned, unsigned);
+
+static inline int mcf_pinmux_is_valid(unsigned pinmux)
+{
+ return pinmux != MCFPINMUX_NONE;
+}
+
+#endif
+
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index fc3f2c22f2b8..74fd674b15ad 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -1,5 +1,170 @@
-#ifdef __uClinux__
-#include "processor_no.h"
+/*
+ * include/asm-m68k/processor.h
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ */
+
+#ifndef __ASM_M68K_PROCESSOR_H
+#define __ASM_M68K_PROCESSOR_H
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+#include <asm/fpu.h>
+#include <asm/ptrace.h>
+
+static inline unsigned long rdusp(void)
+{
+#ifdef CONFIG_COLDFIRE
+ extern unsigned int sw_usp;
+ return sw_usp;
#else
-#include "processor_mm.h"
+ unsigned long usp;
+ __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
+ return usp;
+#endif
+}
+
+static inline void wrusp(unsigned long usp)
+{
+#ifdef CONFIG_COLDFIRE
+ extern unsigned int sw_usp;
+ sw_usp = usp;
+#else
+ __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
+#endif
+}
+
+/*
+ * User space process size: 3.75GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+#ifndef CONFIG_SUN3
+#define TASK_SIZE (0xF0000000UL)
+#else
+#define TASK_SIZE (0x0E000000UL)
+#endif
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#endif
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#ifdef CONFIG_MMU
+#ifndef CONFIG_SUN3
+#define TASK_UNMAPPED_BASE 0xC0000000UL
+#else
+#define TASK_UNMAPPED_BASE 0x0A000000UL
+#endif
+#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
+#else
+#define TASK_UNMAPPED_BASE 0
+#endif
+
+struct thread_struct {
+ unsigned long ksp; /* kernel stack pointer */
+ unsigned long usp; /* user stack pointer */
+ unsigned short sr; /* saved status register */
+ unsigned short fs; /* saved fs (sfc, dfc) */
+ unsigned long crp[2]; /* cpu root pointer */
+ unsigned long esp0; /* points to SR of stack frame */
+ unsigned long faddr; /* info about last fault */
+ int signo, code;
+ unsigned long fp[8*3];
+ unsigned long fpcntl[3]; /* fp control regs */
+ unsigned char fpstate[FPSTATESIZE]; /* floating point state */
+ struct thread_info info;
+};
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+ .sr = PS_S, \
+ .fs = __KERNEL_DS, \
+ .info = INIT_THREAD_INFO(init_task), \
+}
+
+#ifdef CONFIG_MMU
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+static inline void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp)
+{
+ /* reads from user space */
+ set_fs(USER_DS);
+
+ regs->pc = pc;
+ regs->sr &= ~0x2000;
+ wrusp(usp);
+}
+
+#else
+
+/*
+ * Coldfire stacks need to be re-aligned on trap exit, conventional
+ * 68k can handle this case cleanly.
+ */
+#ifdef CONFIG_COLDFIRE
+#define reformat(_regs) do { (_regs)->format = 0x4; } while(0)
+#else
+#define reformat(_regs) do { } while (0)
+#endif
+
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->pc = (_pc); \
+ ((struct switch_stack *)(_regs))[-1].a6 = 0; \
+ reformat(_regs); \
+ if (current->mm) \
+ (_regs)->d5 = current->mm->start_data; \
+ (_regs)->sr &= ~0x2000; \
+ wrusp(_usp); \
+} while(0)
+
+#endif
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/*
+ * Free current thread data structures etc..
+ */
+static inline void exit_thread(void)
+{
+}
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) \
+ ({ \
+ unsigned long eip = 0; \
+ if ((tsk)->thread.esp0 > PAGE_SIZE && \
+ (virt_addr_valid((tsk)->thread.esp0))) \
+ eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
+ eip; })
+#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
+
+#define cpu_relax() barrier()
+
#endif
diff --git a/arch/m68k/include/asm/processor_mm.h b/arch/m68k/include/asm/processor_mm.h
deleted file mode 100644
index 1f61ef53f0e0..000000000000
--- a/arch/m68k/include/asm/processor_mm.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * include/asm-m68k/processor.h
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#ifndef __ASM_M68K_PROCESSOR_H
-#define __ASM_M68K_PROCESSOR_H
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-#include <linux/thread_info.h>
-#include <asm/segment.h>
-#include <asm/fpu.h>
-#include <asm/ptrace.h>
-
-static inline unsigned long rdusp(void)
-{
- unsigned long usp;
-
- __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
- return usp;
-}
-
-static inline void wrusp(unsigned long usp)
-{
- __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
-}
-
-/*
- * User space process size: 3.75GB. This is hardcoded into a few places,
- * so don't change it unless you know what you are doing.
- */
-#ifndef CONFIG_SUN3
-#define TASK_SIZE (0xF0000000UL)
-#else
-#define TASK_SIZE (0x0E000000UL)
-#endif
-
-#ifdef __KERNEL__
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
-#endif
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#ifndef CONFIG_SUN3
-#define TASK_UNMAPPED_BASE 0xC0000000UL
-#else
-#define TASK_UNMAPPED_BASE 0x0A000000UL
-#endif
-#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
-
-struct thread_struct {
- unsigned long ksp; /* kernel stack pointer */
- unsigned long usp; /* user stack pointer */
- unsigned short sr; /* saved status register */
- unsigned short fs; /* saved fs (sfc, dfc) */
- unsigned long crp[2]; /* cpu root pointer */
- unsigned long esp0; /* points to SR of stack frame */
- unsigned long faddr; /* info about last fault */
- int signo, code;
- unsigned long fp[8*3];
- unsigned long fpcntl[3]; /* fp control regs */
- unsigned char fpstate[FPSTATESIZE]; /* floating point state */
- struct thread_info info;
-};
-
-#define INIT_THREAD { \
- .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
- .sr = PS_S, \
- .fs = __KERNEL_DS, \
- .info = INIT_THREAD_INFO(init_task), \
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-static inline void start_thread(struct pt_regs * regs, unsigned long pc,
- unsigned long usp)
-{
- /* reads from user space */
- set_fs(USER_DS);
-
- regs->pc = pc;
- regs->sr &= ~0x2000;
- wrusp(usp);
-}
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-/*
- * Free current thread data structures etc..
- */
-static inline void exit_thread(void)
-{
-}
-
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
-unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk) \
- ({ \
- unsigned long eip = 0; \
- if ((tsk)->thread.esp0 > PAGE_SIZE && \
- (virt_addr_valid((tsk)->thread.esp0))) \
- eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
- eip; })
-#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-
-#define cpu_relax() barrier()
-
-#endif
diff --git a/arch/m68k/include/asm/processor_no.h b/arch/m68k/include/asm/processor_no.h
deleted file mode 100644
index 7a1e0ba35f5a..000000000000
--- a/arch/m68k/include/asm/processor_no.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * include/asm-m68knommu/processor.h
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#ifndef __ASM_M68K_PROCESSOR_H
-#define __ASM_M68K_PROCESSOR_H
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-#include <linux/compiler.h>
-#include <linux/threads.h>
-#include <asm/types.h>
-#include <asm/segment.h>
-#include <asm/fpu.h>
-#include <asm/ptrace.h>
-#include <asm/current.h>
-
-static inline unsigned long rdusp(void)
-{
-#ifdef CONFIG_COLDFIRE
- extern unsigned int sw_usp;
- return(sw_usp);
-#else
- unsigned long usp;
- __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
- return usp;
-#endif
-}
-
-static inline void wrusp(unsigned long usp)
-{
-#ifdef CONFIG_COLDFIRE
- extern unsigned int sw_usp;
- sw_usp = usp;
-#else
- __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
-#endif
-}
-
-/*
- * User space process size: 3.75GB. This is hardcoded into a few places,
- * so don't change it unless you know what you are doing.
- */
-#define TASK_SIZE (0xF0000000UL)
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's. We won't be using it
- */
-#define TASK_UNMAPPED_BASE 0
-
-/*
- * if you change this structure, you must change the code and offsets
- * in m68k/machasm.S
- */
-
-struct thread_struct {
- unsigned long ksp; /* kernel stack pointer */
- unsigned long usp; /* user stack pointer */
- unsigned short sr; /* saved status register */
- unsigned short fs; /* saved fs (sfc, dfc) */
- unsigned long crp[2]; /* cpu root pointer */
- unsigned long esp0; /* points to SR of stack frame */
- unsigned long fp[8*3];
- unsigned long fpcntl[3]; /* fp control regs */
- unsigned char fpstate[FPSTATESIZE]; /* floating point state */
-};
-
-#define INIT_THREAD { \
- .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
- .sr = PS_S, \
- .fs = __KERNEL_DS, \
-}
-
-/*
- * Coldfire stacks need to be re-aligned on trap exit, conventional
- * 68k can handle this case cleanly.
- */
-#if defined(CONFIG_COLDFIRE)
-#define reformat(_regs) do { (_regs)->format = 0x4; } while(0)
-#else
-#define reformat(_regs) do { } while (0)
-#endif
-
-/*
- * Do necessary setup to start up a newly executed thread.
- *
- * pass the data segment into user programs if it exists,
- * it can't hurt anything as far as I can tell
- */
-#define start_thread(_regs, _pc, _usp) \
-do { \
- set_fs(USER_DS); /* reads from user space */ \
- (_regs)->pc = (_pc); \
- ((struct switch_stack *)(_regs))[-1].a6 = 0; \
- reformat(_regs); \
- if (current->mm) \
- (_regs)->d5 = current->mm->start_data; \
- (_regs)->sr &= ~0x2000; \
- wrusp(_usp); \
-} while(0)
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-/*
- * Free current thread data structures etc..
- */
-static inline void exit_thread(void)
-{
-}
-
-unsigned long thread_saved_pc(struct task_struct *tsk);
-unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk) \
- ({ \
- unsigned long eip = 0; \
- if ((tsk)->thread.esp0 > PAGE_SIZE && \
- (virt_addr_valid((tsk)->thread.esp0))) \
- eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
- eip; })
-#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-
-#define cpu_relax() barrier()
-
-#endif
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 01d212bb05a6..905a797ada93 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -87,6 +87,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
/* Stabs debugging sections. */
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index c192f773db96..47d04be322aa 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -82,6 +82,7 @@ __init_begin = .;
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
.crap : {
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 534376299a99..e2201b90aa22 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -47,6 +47,10 @@ config GENERIC_FIND_NEXT_BIT
bool
default y
+config GENERIC_GPIO
+ bool
+ default n
+
config GENERIC_HWEIGHT
bool
default y
@@ -182,6 +186,8 @@ config M527x
config COLDFIRE
bool
depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407)
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
default y
config CLOCK_SET
diff --git a/arch/m68knommu/kernel/irq.c b/arch/m68knommu/kernel/irq.c
index 56e0f4c55a67..3e56f60882b5 100644
--- a/arch/m68knommu/kernel/irq.c
+++ b/arch/m68knommu/kernel/irq.c
@@ -34,27 +34,6 @@ void ack_bad_irq(unsigned int irq)
printk(KERN_ERR "IRQ: unexpected irq=%d\n", irq);
}
-static struct irq_chip m_irq_chip = {
- .name = "M68K-INTC",
- .enable = enable_vector,
- .disable = disable_vector,
- .ack = ack_vector,
-};
-
-void __init init_IRQ(void)
-{
- int irq;
-
- init_vectors();
-
- for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &m_irq_chip;
- }
-}
-
int show_interrupts(struct seq_file *p, void *v)
{
struct irqaction *ap;
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index d182b2f72211..c2aa717de08a 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -69,7 +69,7 @@ static unsigned long read_rtc_mmss(void)
if ((year += 1900) < 1970)
year += 100;
- return mktime(year, mon, day, hour, min, sec);;
+ return mktime(year, mon, day, hour, min, sec);
}
unsigned long read_persistent_clock(void)
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index b7fe505e358d..68111a61a77f 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -188,6 +188,7 @@ SECTIONS {
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
.bss : {
diff --git a/arch/m68knommu/lib/checksum.c b/arch/m68knommu/lib/checksum.c
index 269d83bfbbe1..8bf012aec81c 100644
--- a/arch/m68knommu/lib/checksum.c
+++ b/arch/m68knommu/lib/checksum.c
@@ -127,15 +127,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
EXPORT_SYMBOL(csum_partial);
/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
- return (__force __sum16)~do_csum(buff,len);
-}
-
-/*
* copy from fs while checksumming, otherwise like csum_partial
*/
diff --git a/arch/m68knommu/platform/5206/Makefile b/arch/m68knommu/platform/5206/Makefile
index a439d9ab3f27..113c33390064 100644
--- a/arch/m68knommu/platform/5206/Makefile
+++ b/arch/m68knommu/platform/5206/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/5206/config.c b/arch/m68knommu/platform/5206/config.c
index f6f79874e9af..9c335465e66d 100644
--- a/arch/m68knommu/platform/5206/config.c
+++ b/arch/m68knommu/platform/5206/config.c
@@ -49,11 +49,11 @@ static void __init m5206_uart_init_line(int line, int irq)
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
+ mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
+ mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
@@ -68,38 +68,19 @@ static void __init m5206_uarts_init(void)
/***************************************************************************/
-void mcf_autovector(unsigned int vec)
+static void __init m5206_timers_init(void)
{
- volatile unsigned char *mbar;
- unsigned char icr;
-
- if ((vec >= 25) && (vec <= 31)) {
- vec -= 25;
- mbar = (volatile unsigned char *) MCF_MBAR;
- icr = MCFSIM_ICR_AUTOVEC | (vec << 3);
- *(mbar + MCFSIM_ICR1 + vec) = icr;
- vec = 0x1 << (vec + 1);
- mcf_setimr(mcf_getimr() & ~vec);
- }
-}
-
-/***************************************************************************/
-
-void mcf_settimericr(unsigned int timer, unsigned int level)
-{
- volatile unsigned char *icrp;
- unsigned int icr, imr;
-
- if (timer <= 2) {
- switch (timer) {
- case 2: icr = MCFSIM_TIMER2ICR; imr = MCFSIM_IMR_TIMER2; break;
- default: icr = MCFSIM_TIMER1ICR; imr = MCFSIM_IMR_TIMER1; break;
- }
-
- icrp = (volatile unsigned char *) (MCF_MBAR + icr);
- *icrp = MCFSIM_ICR_AUTOVEC | (level << 2) | MCFSIM_ICR_PRI3;
- mcf_setimr(mcf_getimr() & ~imr);
- }
+ /* Timer1 is always used as system timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER1ICR);
+ mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
+
+#ifdef CONFIG_HIGHPROFILE
+ /* Timer2 is to be used as a high speed profile timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER2ICR);
+ mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
+#endif
}
/***************************************************************************/
@@ -117,15 +98,20 @@ void m5206_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_setimr(MCFSIM_IMR_MASKALL);
mach_reset = m5206_cpu_reset;
+ m5206_timers_init();
+ m5206_uarts_init();
+
+ /* Only support the external interrupts on their primary level */
+ mcf_mapirq2imr(25, MCFINTC_EINT1);
+ mcf_mapirq2imr(28, MCFINTC_EINT4);
+ mcf_mapirq2imr(31, MCFINTC_EINT7);
}
/***************************************************************************/
static int __init init_BSP(void)
{
- m5206_uarts_init();
platform_add_devices(m5206_devices, ARRAY_SIZE(m5206_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5206/gpio.c b/arch/m68knommu/platform/5206/gpio.c
new file mode 100644
index 000000000000..60f779ce1651
--- /dev/null
+++ b/arch/m68knommu/platform/5206/gpio.c
@@ -0,0 +1,49 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PP",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFSIM_PADDR,
+ .podr = MCFSIM_PADAT,
+ .ppdr = MCFSIM_PADAT,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/5206e/Makefile b/arch/m68knommu/platform/5206e/Makefile
index a439d9ab3f27..113c33390064 100644
--- a/arch/m68knommu/platform/5206e/Makefile
+++ b/arch/m68knommu/platform/5206e/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 65887799db81..0f41ba82a3b5 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -15,6 +15,7 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
#include <asm/mcfuart.h>
@@ -49,11 +50,11 @@ static void __init m5206e_uart_init_line(int line, int irq)
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
+ mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
+ mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
@@ -68,38 +69,19 @@ static void __init m5206e_uarts_init(void)
/***************************************************************************/
-void mcf_autovector(unsigned int vec)
-{
- volatile unsigned char *mbar;
- unsigned char icr;
-
- if ((vec >= 25) && (vec <= 31)) {
- vec -= 25;
- mbar = (volatile unsigned char *) MCF_MBAR;
- icr = MCFSIM_ICR_AUTOVEC | (vec << 3);
- *(mbar + MCFSIM_ICR1 + vec) = icr;
- vec = 0x1 << (vec + 1);
- mcf_setimr(mcf_getimr() & ~vec);
- }
-}
-
-/***************************************************************************/
-
-void mcf_settimericr(unsigned int timer, unsigned int level)
+static void __init m5206e_timers_init(void)
{
- volatile unsigned char *icrp;
- unsigned int icr, imr;
-
- if (timer <= 2) {
- switch (timer) {
- case 2: icr = MCFSIM_TIMER2ICR; imr = MCFSIM_IMR_TIMER2; break;
- default: icr = MCFSIM_TIMER1ICR; imr = MCFSIM_IMR_TIMER1; break;
- }
-
- icrp = (volatile unsigned char *) (MCF_MBAR + icr);
- *icrp = MCFSIM_ICR_AUTOVEC | (level << 2) | MCFSIM_ICR_PRI3;
- mcf_setimr(mcf_getimr() & ~imr);
- }
+ /* Timer1 is always used as system timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER1ICR);
+ mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
+
+#ifdef CONFIG_HIGHPROFILE
+ /* Timer2 is to be used as a high speed profile timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER2ICR);
+ mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
+#endif
}
/***************************************************************************/
@@ -117,8 +99,6 @@ void m5206e_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_setimr(MCFSIM_IMR_MASKALL);
-
#if defined(CONFIG_NETtel)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
@@ -126,13 +106,19 @@ void __init config_BSP(char *commandp, int size)
#endif /* CONFIG_NETtel */
mach_reset = m5206e_cpu_reset;
+ m5206e_timers_init();
+ m5206e_uarts_init();
+
+ /* Only support the external interrupts on their primary level */
+ mcf_mapirq2imr(25, MCFINTC_EINT1);
+ mcf_mapirq2imr(28, MCFINTC_EINT4);
+ mcf_mapirq2imr(31, MCFINTC_EINT7);
}
/***************************************************************************/
static int __init init_BSP(void)
{
- m5206e_uarts_init();
platform_add_devices(m5206e_devices, ARRAY_SIZE(m5206e_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5206e/gpio.c b/arch/m68knommu/platform/5206e/gpio.c
new file mode 100644
index 000000000000..60f779ce1651
--- /dev/null
+++ b/arch/m68knommu/platform/5206e/gpio.c
@@ -0,0 +1,49 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PP",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFSIM_PADDR,
+ .podr = MCFSIM_PADAT,
+ .ppdr = MCFSIM_PADAT,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/520x/Makefile b/arch/m68knommu/platform/520x/Makefile
index a50e76acc8fd..435ab3483dc1 100644
--- a/arch/m68knommu/platform/520x/Makefile
+++ b/arch/m68knommu/platform/520x/Makefile
@@ -14,4 +14,4 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
index 1c43a8aec69b..92614de42cd3 100644
--- a/arch/m68knommu/platform/520x/config.c
+++ b/arch/m68knommu/platform/520x/config.c
@@ -81,20 +81,11 @@ static struct platform_device *m520x_devices[] __initdata = {
/***************************************************************************/
-#define INTC0 (MCF_MBAR + MCFICM_INTC0)
-
static void __init m520x_uart_init_line(int line, int irq)
{
- u32 imr;
u16 par;
u8 par2;
- writeb(0x03, INTC0 + MCFINTC_ICR0 + MCFINT_UART0 + line);
-
- imr = readl(INTC0 + MCFINTC_IMRL);
- imr &= ~((1 << (irq - MCFINT_VECBASE)) | 1);
- writel(imr, INTC0 + MCFINTC_IMRL);
-
switch (line) {
case 0:
par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART);
@@ -131,18 +122,8 @@ static void __init m520x_uarts_init(void)
static void __init m520x_fec_init(void)
{
- u32 imr;
u8 v;
- /* Unmask FEC interrupts at ColdFire interrupt controller */
- writeb(0x4, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 36);
- writeb(0x4, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 40);
- writeb(0x4, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 42);
-
- imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
- imr &= ~0x0001FFF0;
- writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
-
/* Set multi-function pins to ethernet mode */
v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FEC);
writeb(v | 0xf0, MCF_IPSBAR + MCF_GPIO_PAR_FEC);
@@ -153,17 +134,6 @@ static void __init m520x_fec_init(void)
/***************************************************************************/
-/*
- * Program the vector to be an auto-vectored.
- */
-
-void mcf_autovector(unsigned int vec)
-{
- /* Everything is auto-vectored on the 520x devices */
-}
-
-/***************************************************************************/
-
static void m520x_cpu_reset(void)
{
local_irq_disable();
diff --git a/arch/m68knommu/platform/520x/gpio.c b/arch/m68knommu/platform/520x/gpio.c
new file mode 100644
index 000000000000..15b5bb62a698
--- /dev/null
+++ b/arch/m68knommu/platform/520x/gpio.c
@@ -0,0 +1,211 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PIRQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFEPORT_EPDDR,
+ .podr = MCFEPORT_EPDR,
+ .ppdr = MCFEPORT_EPPDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "BUSCTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 8,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_BUSCTL,
+ .podr = MCFGPIO_PODR_BUSCTL,
+ .ppdr = MCFGPIO_PPDSDR_BUSCTL,
+ .setr = MCFGPIO_PPDSDR_BUSCTL,
+ .clrr = MCFGPIO_PCLRR_BUSCTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "BE",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 16,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_BE,
+ .podr = MCFGPIO_PODR_BE,
+ .ppdr = MCFGPIO_PPDSDR_BE,
+ .setr = MCFGPIO_PPDSDR_BE,
+ .clrr = MCFGPIO_PCLRR_BE,
+ },
+ {
+ .gpio_chip = {
+ .label = "CS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 25,
+ .ngpio = 3,
+ },
+ .pddr = MCFGPIO_PDDR_CS,
+ .podr = MCFGPIO_PODR_CS,
+ .ppdr = MCFGPIO_PPDSDR_CS,
+ .setr = MCFGPIO_PPDSDR_CS,
+ .clrr = MCFGPIO_PCLRR_CS,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECI2C",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 32,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_FECI2C,
+ .podr = MCFGPIO_PODR_FECI2C,
+ .ppdr = MCFGPIO_PPDSDR_FECI2C,
+ .setr = MCFGPIO_PPDSDR_FECI2C,
+ .clrr = MCFGPIO_PCLRR_FECI2C,
+ },
+ {
+ .gpio_chip = {
+ .label = "QSPI",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 40,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_QSPI,
+ .podr = MCFGPIO_PODR_QSPI,
+ .ppdr = MCFGPIO_PPDSDR_QSPI,
+ .setr = MCFGPIO_PPDSDR_QSPI,
+ .clrr = MCFGPIO_PCLRR_QSPI,
+ },
+ {
+ .gpio_chip = {
+ .label = "TIMER",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 48,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_TIMER,
+ .podr = MCFGPIO_PODR_TIMER,
+ .ppdr = MCFGPIO_PPDSDR_TIMER,
+ .setr = MCFGPIO_PPDSDR_TIMER,
+ .clrr = MCFGPIO_PCLRR_TIMER,
+ },
+ {
+ .gpio_chip = {
+ .label = "UART",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 56,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_UART,
+ .podr = MCFGPIO_PODR_UART,
+ .ppdr = MCFGPIO_PPDSDR_UART,
+ .setr = MCFGPIO_PPDSDR_UART,
+ .clrr = MCFGPIO_PCLRR_UART,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 64,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FECH,
+ .podr = MCFGPIO_PODR_FECH,
+ .ppdr = MCFGPIO_PPDSDR_FECH,
+ .setr = MCFGPIO_PPDSDR_FECH,
+ .clrr = MCFGPIO_PCLRR_FECH,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 72,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FECL,
+ .podr = MCFGPIO_PODR_FECL,
+ .ppdr = MCFGPIO_PPDSDR_FECL,
+ .setr = MCFGPIO_PPDSDR_FECL,
+ .clrr = MCFGPIO_PCLRR_FECL,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/523x/Makefile b/arch/m68knommu/platform/523x/Makefile
index 5694d593f029..b8f9b45440c2 100644
--- a/arch/m68knommu/platform/523x/Makefile
+++ b/arch/m68knommu/platform/523x/Makefile
@@ -14,4 +14,4 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c
index 961fefebca14..00f08d2e43b0 100644
--- a/arch/m68knommu/platform/523x/config.c
+++ b/arch/m68knommu/platform/523x/config.c
@@ -82,66 +82,6 @@ static struct platform_device *m523x_devices[] __initdata = {
/***************************************************************************/
-#define INTC0 (MCF_MBAR + MCFICM_INTC0)
-
-static void __init m523x_uart_init_line(int line, int irq)
-{
- u32 imr;
-
- if ((line < 0) || (line > 2))
- return;
-
- writeb(0x30+line, (INTC0 + MCFINTC_ICR0 + MCFINT_UART0 + line));
-
- imr = readl(INTC0 + MCFINTC_IMRL);
- imr &= ~((1 << (irq - MCFINT_VECBASE)) | 1);
- writel(imr, INTC0 + MCFINTC_IMRL);
-}
-
-static void __init m523x_uarts_init(void)
-{
- const int nrlines = ARRAY_SIZE(m523x_uart_platform);
- int line;
-
- for (line = 0; (line < nrlines); line++)
- m523x_uart_init_line(line, m523x_uart_platform[line].irq);
-}
-
-/***************************************************************************/
-
-static void __init m523x_fec_init(void)
-{
- u32 imr;
-
- /* Unmask FEC interrupts at ColdFire interrupt controller */
- writeb(0x28, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 23);
- writeb(0x27, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 27);
- writeb(0x26, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 29);
-
- imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
- imr &= ~0xf;
- writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
- imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
- imr &= ~0xff800001;
- writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
-}
-
-/***************************************************************************/
-
-void mcf_disableall(void)
-{
- *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH)) = 0xffffffff;
- *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL)) = 0xffffffff;
-}
-
-/***************************************************************************/
-
-void mcf_autovector(unsigned int vec)
-{
- /* Everything is auto-vectored on the 523x */
-}
-/***************************************************************************/
-
static void m523x_cpu_reset(void)
{
local_irq_disable();
@@ -152,10 +92,7 @@ static void m523x_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_disableall();
mach_reset = m523x_cpu_reset;
- m523x_uarts_init();
- m523x_fec_init();
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/523x/gpio.c b/arch/m68knommu/platform/523x/gpio.c
new file mode 100644
index 000000000000..f02840d54d3c
--- /dev/null
+++ b/arch/m68knommu/platform/523x/gpio.c
@@ -0,0 +1,283 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PIRQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFEPORT_EPDDR,
+ .podr = MCFEPORT_EPDR,
+ .ppdr = MCFEPORT_EPPDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "ADDR",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 13,
+ .ngpio = 3,
+ },
+ .pddr = MCFGPIO_PDDR_ADDR,
+ .podr = MCFGPIO_PODR_ADDR,
+ .ppdr = MCFGPIO_PPDSDR_ADDR,
+ .setr = MCFGPIO_PPDSDR_ADDR,
+ .clrr = MCFGPIO_PCLRR_ADDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "DATAH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 16,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_DATAH,
+ .podr = MCFGPIO_PODR_DATAH,
+ .ppdr = MCFGPIO_PPDSDR_DATAH,
+ .setr = MCFGPIO_PPDSDR_DATAH,
+ .clrr = MCFGPIO_PCLRR_DATAH,
+ },
+ {
+ .gpio_chip = {
+ .label = "DATAL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 24,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_DATAL,
+ .podr = MCFGPIO_PODR_DATAL,
+ .ppdr = MCFGPIO_PPDSDR_DATAL,
+ .setr = MCFGPIO_PPDSDR_DATAL,
+ .clrr = MCFGPIO_PCLRR_DATAL,
+ },
+ {
+ .gpio_chip = {
+ .label = "BUSCTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 32,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_BUSCTL,
+ .podr = MCFGPIO_PODR_BUSCTL,
+ .ppdr = MCFGPIO_PPDSDR_BUSCTL,
+ .setr = MCFGPIO_PPDSDR_BUSCTL,
+ .clrr = MCFGPIO_PCLRR_BUSCTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "BS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 40,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_BS,
+ .podr = MCFGPIO_PODR_BS,
+ .ppdr = MCFGPIO_PPDSDR_BS,
+ .setr = MCFGPIO_PPDSDR_BS,
+ .clrr = MCFGPIO_PCLRR_BS,
+ },
+ {
+ .gpio_chip = {
+ .label = "CS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 49,
+ .ngpio = 7,
+ },
+ .pddr = MCFGPIO_PDDR_CS,
+ .podr = MCFGPIO_PODR_CS,
+ .ppdr = MCFGPIO_PPDSDR_CS,
+ .setr = MCFGPIO_PPDSDR_CS,
+ .clrr = MCFGPIO_PCLRR_CS,
+ },
+ {
+ .gpio_chip = {
+ .label = "SDRAM",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 56,
+ .ngpio = 6,
+ },
+ .pddr = MCFGPIO_PDDR_SDRAM,
+ .podr = MCFGPIO_PODR_SDRAM,
+ .ppdr = MCFGPIO_PPDSDR_SDRAM,
+ .setr = MCFGPIO_PPDSDR_SDRAM,
+ .clrr = MCFGPIO_PCLRR_SDRAM,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECI2C",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 64,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_FECI2C,
+ .podr = MCFGPIO_PODR_FECI2C,
+ .ppdr = MCFGPIO_PPDSDR_FECI2C,
+ .setr = MCFGPIO_PPDSDR_FECI2C,
+ .clrr = MCFGPIO_PCLRR_FECI2C,
+ },
+ {
+ .gpio_chip = {
+ .label = "UARTH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 72,
+ .ngpio = 2,
+ },
+ .pddr = MCFGPIO_PDDR_UARTH,
+ .podr = MCFGPIO_PODR_UARTH,
+ .ppdr = MCFGPIO_PPDSDR_UARTH,
+ .setr = MCFGPIO_PPDSDR_UARTH,
+ .clrr = MCFGPIO_PCLRR_UARTH,
+ },
+ {
+ .gpio_chip = {
+ .label = "UARTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 80,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_UARTL,
+ .podr = MCFGPIO_PODR_UARTL,
+ .ppdr = MCFGPIO_PPDSDR_UARTL,
+ .setr = MCFGPIO_PPDSDR_UARTL,
+ .clrr = MCFGPIO_PCLRR_UARTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "QSPI",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 88,
+ .ngpio = 5,
+ },
+ .pddr = MCFGPIO_PDDR_QSPI,
+ .podr = MCFGPIO_PODR_QSPI,
+ .ppdr = MCFGPIO_PPDSDR_QSPI,
+ .setr = MCFGPIO_PPDSDR_QSPI,
+ .clrr = MCFGPIO_PCLRR_QSPI,
+ },
+ {
+ .gpio_chip = {
+ .label = "TIMER",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 96,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_TIMER,
+ .podr = MCFGPIO_PODR_TIMER,
+ .ppdr = MCFGPIO_PPDSDR_TIMER,
+ .setr = MCFGPIO_PPDSDR_TIMER,
+ .clrr = MCFGPIO_PCLRR_TIMER,
+ },
+ {
+ .gpio_chip = {
+ .label = "ETPU",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 104,
+ .ngpio = 3,
+ },
+ .pddr = MCFGPIO_PDDR_ETPU,
+ .podr = MCFGPIO_PODR_ETPU,
+ .ppdr = MCFGPIO_PPDSDR_ETPU,
+ .setr = MCFGPIO_PPDSDR_ETPU,
+ .clrr = MCFGPIO_PCLRR_ETPU,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/5249/Makefile b/arch/m68knommu/platform/5249/Makefile
index a439d9ab3f27..f2bd0b4fd0ab 100644
--- a/arch/m68knommu/platform/5249/Makefile
+++ b/arch/m68knommu/platform/5249/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o intc2.o gpio.o
diff --git a/arch/m68knommu/platform/5249/config.c b/arch/m68knommu/platform/5249/config.c
index 93d998825925..646f5ba462fc 100644
--- a/arch/m68knommu/platform/5249/config.c
+++ b/arch/m68knommu/platform/5249/config.c
@@ -48,11 +48,11 @@ static void __init m5249_uart_init_line(int line, int irq)
if (line == 0) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
+ mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
+ mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
@@ -65,38 +65,21 @@ static void __init m5249_uarts_init(void)
m5249_uart_init_line(line, m5249_uart_platform[line].irq);
}
-
/***************************************************************************/
-void mcf_autovector(unsigned int vec)
+static void __init m5249_timers_init(void)
{
- volatile unsigned char *mbar;
-
- if ((vec >= 25) && (vec <= 31)) {
- mbar = (volatile unsigned char *) MCF_MBAR;
- vec = 0x1 << (vec - 24);
- *(mbar + MCFSIM_AVR) |= vec;
- mcf_setimr(mcf_getimr() & ~vec);
- }
-}
-
-/***************************************************************************/
-
-void mcf_settimericr(unsigned int timer, unsigned int level)
-{
- volatile unsigned char *icrp;
- unsigned int icr, imr;
-
- if (timer <= 2) {
- switch (timer) {
- case 2: icr = MCFSIM_TIMER2ICR; imr = MCFSIM_IMR_TIMER2; break;
- default: icr = MCFSIM_TIMER1ICR; imr = MCFSIM_IMR_TIMER1; break;
- }
-
- icrp = (volatile unsigned char *) (MCF_MBAR + icr);
- *icrp = MCFSIM_ICR_AUTOVEC | (level << 2) | MCFSIM_ICR_PRI3;
- mcf_setimr(mcf_getimr() & ~imr);
- }
+ /* Timer1 is always used as system timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER1ICR);
+ mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
+
+#ifdef CONFIG_HIGHPROFILE
+ /* Timer2 is to be used as a high speed profile timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER2ICR);
+ mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
+#endif
}
/***************************************************************************/
@@ -114,15 +97,15 @@ void m5249_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_setimr(MCFSIM_IMR_MASKALL);
mach_reset = m5249_cpu_reset;
+ m5249_timers_init();
+ m5249_uarts_init();
}
/***************************************************************************/
static int __init init_BSP(void)
{
- m5249_uarts_init();
platform_add_devices(m5249_devices, ARRAY_SIZE(m5249_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5249/gpio.c b/arch/m68knommu/platform/5249/gpio.c
new file mode 100644
index 000000000000..c611eab8b3b6
--- /dev/null
+++ b/arch/m68knommu/platform/5249/gpio.c
@@ -0,0 +1,65 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "GPIO0",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 32,
+ },
+ .pddr = MCFSIM2_GPIOENABLE,
+ .podr = MCFSIM2_GPIOWRITE,
+ .ppdr = MCFSIM2_GPIOREAD,
+ },
+ {
+ .gpio_chip = {
+ .label = "GPIO1",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .base = 32,
+ .ngpio = 32,
+ },
+ .pddr = MCFSIM2_GPIO1ENABLE,
+ .podr = MCFSIM2_GPIO1WRITE,
+ .ppdr = MCFSIM2_GPIO1READ,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c
new file mode 100644
index 000000000000..d09d9da04537
--- /dev/null
+++ b/arch/m68knommu/platform/5249/intc2.c
@@ -0,0 +1,59 @@
+/*
+ * intc2.c -- support for the 2nd INTC controller of the 5249
+ *
+ * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+static void intc2_irq_gpio_mask(unsigned int irq)
+{
+ u32 imr;
+ imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
+ imr &= ~(0x1 << (irq - MCFINTC2_GPIOIRQ0));
+ writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
+}
+
+static void intc2_irq_gpio_unmask(unsigned int irq)
+{
+ u32 imr;
+ imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
+ imr |= (0x1 << (irq - MCFINTC2_GPIOIRQ0));
+ writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
+}
+
+static void intc2_irq_gpio_ack(unsigned int irq)
+{
+ writel(0x1 << (irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR);
+}
+
+static struct irq_chip intc2_irq_gpio_chip = {
+ .name = "CF-INTC2",
+ .mask = intc2_irq_gpio_mask,
+ .unmask = intc2_irq_gpio_unmask,
+ .ack = intc2_irq_gpio_ack,
+};
+
+static int __init mcf_intc2_init(void)
+{
+ int irq;
+
+ /* GPIO interrupt sources */
+ for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++)
+ irq_desc[irq].chip = &intc2_irq_gpio_chip;
+
+ return 0;
+}
+
+arch_initcall(mcf_intc2_init);
diff --git a/arch/m68knommu/platform/5272/Makefile b/arch/m68knommu/platform/5272/Makefile
index 26135d92b34d..3d90e6d92459 100644
--- a/arch/m68knommu/platform/5272/Makefile
+++ b/arch/m68knommu/platform/5272/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/5272/config.c b/arch/m68knommu/platform/5272/config.c
index 5f95fcde05fd..b16add9aa4e5 100644
--- a/arch/m68knommu/platform/5272/config.c
+++ b/arch/m68knommu/platform/5272/config.c
@@ -20,12 +20,6 @@
/***************************************************************************/
-extern unsigned int mcf_timervector;
-extern unsigned int mcf_profilevector;
-extern unsigned int mcf_timerlevel;
-
-/***************************************************************************/
-
/*
* Some platforms need software versions of the GPIO data registers.
*/
@@ -148,21 +142,15 @@ void mcf_disableall(void)
/***************************************************************************/
-void mcf_autovector(unsigned int vec)
-{
- /* Everything is auto-vectored on the 5272 */
-}
-
-/***************************************************************************/
-
-void mcf_settimericr(int timer, int level)
+static void __init m5272_timers_init(void)
{
- volatile unsigned long *icrp;
+ /* Timer1 @ level6 is always used as system timer */
+ writel((0x8 | 0x6) << ((4 - 1) * 4), MCF_MBAR + MCFSIM_ICR1);
- if ((timer >= 1 ) && (timer <= 4)) {
- icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
- *icrp = (0x8 | level) << ((4 - timer) * 4);
- }
+#ifdef CONFIG_HIGHPROFILE
+ /* Timer2 @ level7 is to be used as a high speed profile timer */
+ writel((0x8 | 0x7) << ((4 - 2) * 4), MCF_MBAR + MCFSIM_ICR1);
+#endif
}
/***************************************************************************/
@@ -202,9 +190,8 @@ void __init config_BSP(char *commandp, int size)
commandp[size-1] = 0;
#endif
- mcf_timervector = 69;
- mcf_profilevector = 70;
mach_reset = m5272_cpu_reset;
+ m5272_timers_init();
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/5272/gpio.c b/arch/m68knommu/platform/5272/gpio.c
new file mode 100644
index 000000000000..459db89a89cc
--- /dev/null
+++ b/arch/m68knommu/platform/5272/gpio.c
@@ -0,0 +1,81 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PA",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 16,
+ },
+ .pddr = MCFSIM_PADDR,
+ .podr = MCFSIM_PADAT,
+ .ppdr = MCFSIM_PADAT,
+ },
+ {
+ .gpio_chip = {
+ .label = "PB",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .base = 16,
+ .ngpio = 16,
+ },
+ .pddr = MCFSIM_PBDDR,
+ .podr = MCFSIM_PBDAT,
+ .ppdr = MCFSIM_PBDAT,
+ },
+ {
+ .gpio_chip = {
+ .label = "PC",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .base = 32,
+ .ngpio = 16,
+ },
+ .pddr = MCFSIM_PCDDR,
+ .podr = MCFSIM_PCDAT,
+ .ppdr = MCFSIM_PCDAT,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/527x/Makefile b/arch/m68knommu/platform/527x/Makefile
index 26135d92b34d..3d90e6d92459 100644
--- a/arch/m68knommu/platform/527x/Makefile
+++ b/arch/m68knommu/platform/527x/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c
index f746439cfd3e..fa51be172830 100644
--- a/arch/m68knommu/platform/527x/config.c
+++ b/arch/m68knommu/platform/527x/config.c
@@ -116,23 +116,13 @@ static struct platform_device *m527x_devices[] __initdata = {
/***************************************************************************/
-#define INTC0 (MCF_MBAR + MCFICM_INTC0)
-
static void __init m527x_uart_init_line(int line, int irq)
{
u16 sepmask;
- u32 imr;
if ((line < 0) || (line > 2))
return;
- /* level 6, line based priority */
- writeb(0x30+line, INTC0 + MCFINTC_ICR0 + MCFINT_UART0 + line);
-
- imr = readl(INTC0 + MCFINTC_IMRL);
- imr &= ~((1 << (irq - MCFINT_VECBASE)) | 1);
- writel(imr, INTC0 + MCFINTC_IMRL);
-
/*
* External Pin Mask Setting & Enable External Pin for Interface
*/
@@ -157,32 +147,11 @@ static void __init m527x_uarts_init(void)
/***************************************************************************/
-static void __init m527x_fec_irq_init(int nr)
-{
- unsigned long base;
- u32 imr;
-
- base = MCF_IPSBAR + (nr ? MCFICM_INTC1 : MCFICM_INTC0);
-
- writeb(0x28, base + MCFINTC_ICR0 + 23);
- writeb(0x27, base + MCFINTC_ICR0 + 27);
- writeb(0x26, base + MCFINTC_ICR0 + 29);
-
- imr = readl(base + MCFINTC_IMRH);
- imr &= ~0xf;
- writel(imr, base + MCFINTC_IMRH);
- imr = readl(base + MCFINTC_IMRL);
- imr &= ~0xff800001;
- writel(imr, base + MCFINTC_IMRL);
-}
-
static void __init m527x_fec_init(void)
{
u16 par;
u8 v;
- m527x_fec_irq_init(0);
-
/* Set multi-function pins to ethernet mode for fec0 */
#if defined(CONFIG_M5271)
v = readb(MCF_IPSBAR + 0x100047);
@@ -195,8 +164,6 @@ static void __init m527x_fec_init(void)
#endif
#ifdef CONFIG_FEC2
- m527x_fec_irq_init(1);
-
/* Set multi-function pins to ethernet mode for fec1 */
par = readw(MCF_IPSBAR + 0x100082);
writew(par | 0xa0, MCF_IPSBAR + 0x100082);
@@ -207,21 +174,6 @@ static void __init m527x_fec_init(void)
/***************************************************************************/
-void mcf_disableall(void)
-{
- *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH)) = 0xffffffff;
- *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL)) = 0xffffffff;
-}
-
-/***************************************************************************/
-
-void mcf_autovector(unsigned int vec)
-{
- /* Everything is auto-vectored on the 5272 */
-}
-
-/***************************************************************************/
-
static void m527x_cpu_reset(void)
{
local_irq_disable();
@@ -232,7 +184,6 @@ static void m527x_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_disableall();
mach_reset = m527x_cpu_reset;
m527x_uarts_init();
m527x_fec_init();
diff --git a/arch/m68knommu/platform/527x/gpio.c b/arch/m68knommu/platform/527x/gpio.c
new file mode 100644
index 000000000000..1028142851ac
--- /dev/null
+++ b/arch/m68knommu/platform/527x/gpio.c
@@ -0,0 +1,607 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+#if defined(CONFIG_M5271)
+ {
+ .gpio_chip = {
+ .label = "PIRQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFEPORT_EPDDR,
+ .podr = MCFEPORT_EPDR,
+ .ppdr = MCFEPORT_EPPDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "ADDR",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 13,
+ .ngpio = 3,
+ },
+ .pddr = MCFGPIO_PDDR_ADDR,
+ .podr = MCFGPIO_PODR_ADDR,
+ .ppdr = MCFGPIO_PPDSDR_ADDR,
+ .setr = MCFGPIO_PPDSDR_ADDR,
+ .clrr = MCFGPIO_PCLRR_ADDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "DATAH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 16,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_DATAH,
+ .podr = MCFGPIO_PODR_DATAH,
+ .ppdr = MCFGPIO_PPDSDR_DATAH,
+ .setr = MCFGPIO_PPDSDR_DATAH,
+ .clrr = MCFGPIO_PCLRR_DATAH,
+ },
+ {
+ .gpio_chip = {
+ .label = "DATAL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 24,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_DATAL,
+ .podr = MCFGPIO_PODR_DATAL,
+ .ppdr = MCFGPIO_PPDSDR_DATAL,
+ .setr = MCFGPIO_PPDSDR_DATAL,
+ .clrr = MCFGPIO_PCLRR_DATAL,
+ },
+ {
+ .gpio_chip = {
+ .label = "BUSCTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 32,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_BUSCTL,
+ .podr = MCFGPIO_PODR_BUSCTL,
+ .ppdr = MCFGPIO_PPDSDR_BUSCTL,
+ .setr = MCFGPIO_PPDSDR_BUSCTL,
+ .clrr = MCFGPIO_PCLRR_BUSCTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "BS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 40,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_BS,
+ .podr = MCFGPIO_PODR_BS,
+ .ppdr = MCFGPIO_PPDSDR_BS,
+ .setr = MCFGPIO_PPDSDR_BS,
+ .clrr = MCFGPIO_PCLRR_BS,
+ },
+ {
+ .gpio_chip = {
+ .label = "CS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 49,
+ .ngpio = 7,
+ },
+ .pddr = MCFGPIO_PDDR_CS,
+ .podr = MCFGPIO_PODR_CS,
+ .ppdr = MCFGPIO_PPDSDR_CS,
+ .setr = MCFGPIO_PPDSDR_CS,
+ .clrr = MCFGPIO_PCLRR_CS,
+ },
+ {
+ .gpio_chip = {
+ .label = "SDRAM",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 56,
+ .ngpio = 6,
+ },
+ .pddr = MCFGPIO_PDDR_SDRAM,
+ .podr = MCFGPIO_PODR_SDRAM,
+ .ppdr = MCFGPIO_PPDSDR_SDRAM,
+ .setr = MCFGPIO_PPDSDR_SDRAM,
+ .clrr = MCFGPIO_PCLRR_SDRAM,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECI2C",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 64,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_FECI2C,
+ .podr = MCFGPIO_PODR_FECI2C,
+ .ppdr = MCFGPIO_PPDSDR_FECI2C,
+ .setr = MCFGPIO_PPDSDR_FECI2C,
+ .clrr = MCFGPIO_PCLRR_FECI2C,
+ },
+ {
+ .gpio_chip = {
+ .label = "UARTH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 72,
+ .ngpio = 2,
+ },
+ .pddr = MCFGPIO_PDDR_UARTH,
+ .podr = MCFGPIO_PODR_UARTH,
+ .ppdr = MCFGPIO_PPDSDR_UARTH,
+ .setr = MCFGPIO_PPDSDR_UARTH,
+ .clrr = MCFGPIO_PCLRR_UARTH,
+ },
+ {
+ .gpio_chip = {
+ .label = "UARTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 80,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_UARTL,
+ .podr = MCFGPIO_PODR_UARTL,
+ .ppdr = MCFGPIO_PPDSDR_UARTL,
+ .setr = MCFGPIO_PPDSDR_UARTL,
+ .clrr = MCFGPIO_PCLRR_UARTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "QSPI",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 88,
+ .ngpio = 5,
+ },
+ .pddr = MCFGPIO_PDDR_QSPI,
+ .podr = MCFGPIO_PODR_QSPI,
+ .ppdr = MCFGPIO_PPDSDR_QSPI,
+ .setr = MCFGPIO_PPDSDR_QSPI,
+ .clrr = MCFGPIO_PCLRR_QSPI,
+ },
+ {
+ .gpio_chip = {
+ .label = "TIMER",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 96,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_TIMER,
+ .podr = MCFGPIO_PODR_TIMER,
+ .ppdr = MCFGPIO_PPDSDR_TIMER,
+ .setr = MCFGPIO_PPDSDR_TIMER,
+ .clrr = MCFGPIO_PCLRR_TIMER,
+ },
+#elif defined(CONFIG_M5275)
+ {
+ .gpio_chip = {
+ .label = "PIRQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFEPORT_EPDDR,
+ .podr = MCFEPORT_EPDR,
+ .ppdr = MCFEPORT_EPPDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "BUSCTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 8,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_BUSCTL,
+ .podr = MCFGPIO_PODR_BUSCTL,
+ .ppdr = MCFGPIO_PPDSDR_BUSCTL,
+ .setr = MCFGPIO_PPDSDR_BUSCTL,
+ .clrr = MCFGPIO_PCLRR_BUSCTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "ADDR",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 21,
+ .ngpio = 3,
+ },
+ .pddr = MCFGPIO_PDDR_ADDR,
+ .podr = MCFGPIO_PODR_ADDR,
+ .ppdr = MCFGPIO_PPDSDR_ADDR,
+ .setr = MCFGPIO_PPDSDR_ADDR,
+ .clrr = MCFGPIO_PCLRR_ADDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "CS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 25,
+ .ngpio = 7,
+ },
+ .pddr = MCFGPIO_PDDR_CS,
+ .podr = MCFGPIO_PODR_CS,
+ .ppdr = MCFGPIO_PPDSDR_CS,
+ .setr = MCFGPIO_PPDSDR_CS,
+ .clrr = MCFGPIO_PCLRR_CS,
+ },
+ {
+ .gpio_chip = {
+ .label = "FEC0H",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 32,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FEC0H,
+ .podr = MCFGPIO_PODR_FEC0H,
+ .ppdr = MCFGPIO_PPDSDR_FEC0H,
+ .setr = MCFGPIO_PPDSDR_FEC0H,
+ .clrr = MCFGPIO_PCLRR_FEC0H,
+ },
+ {
+ .gpio_chip = {
+ .label = "FEC0L",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 40,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FEC0L,
+ .podr = MCFGPIO_PODR_FEC0L,
+ .ppdr = MCFGPIO_PPDSDR_FEC0L,
+ .setr = MCFGPIO_PPDSDR_FEC0L,
+ .clrr = MCFGPIO_PCLRR_FEC0L,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECI2C",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 48,
+ .ngpio = 6,
+ },
+ .pddr = MCFGPIO_PDDR_FECI2C,
+ .podr = MCFGPIO_PODR_FECI2C,
+ .ppdr = MCFGPIO_PPDSDR_FECI2C,
+ .setr = MCFGPIO_PPDSDR_FECI2C,
+ .clrr = MCFGPIO_PCLRR_FECI2C,
+ },
+ {
+ .gpio_chip = {
+ .label = "QSPI",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 56,
+ .ngpio = 7,
+ },
+ .pddr = MCFGPIO_PDDR_QSPI,
+ .podr = MCFGPIO_PODR_QSPI,
+ .ppdr = MCFGPIO_PPDSDR_QSPI,
+ .setr = MCFGPIO_PPDSDR_QSPI,
+ .clrr = MCFGPIO_PCLRR_QSPI,
+ },
+ {
+ .gpio_chip = {
+ .label = "SDRAM",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 64,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_SDRAM,
+ .podr = MCFGPIO_PODR_SDRAM,
+ .ppdr = MCFGPIO_PPDSDR_SDRAM,
+ .setr = MCFGPIO_PPDSDR_SDRAM,
+ .clrr = MCFGPIO_PCLRR_SDRAM,
+ },
+ {
+ .gpio_chip = {
+ .label = "TIMERH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 72,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_TIMERH,
+ .podr = MCFGPIO_PODR_TIMERH,
+ .ppdr = MCFGPIO_PPDSDR_TIMERH,
+ .setr = MCFGPIO_PPDSDR_TIMERH,
+ .clrr = MCFGPIO_PCLRR_TIMERH,
+ },
+ {
+ .gpio_chip = {
+ .label = "TIMERL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 80,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_TIMERL,
+ .podr = MCFGPIO_PODR_TIMERL,
+ .ppdr = MCFGPIO_PPDSDR_TIMERL,
+ .setr = MCFGPIO_PPDSDR_TIMERL,
+ .clrr = MCFGPIO_PCLRR_TIMERL,
+ },
+ {
+ .gpio_chip = {
+ .label = "UARTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 88,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_UARTL,
+ .podr = MCFGPIO_PODR_UARTL,
+ .ppdr = MCFGPIO_PPDSDR_UARTL,
+ .setr = MCFGPIO_PPDSDR_UARTL,
+ .clrr = MCFGPIO_PCLRR_UARTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "FEC1H",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 96,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FEC1H,
+ .podr = MCFGPIO_PODR_FEC1H,
+ .ppdr = MCFGPIO_PPDSDR_FEC1H,
+ .setr = MCFGPIO_PPDSDR_FEC1H,
+ .clrr = MCFGPIO_PCLRR_FEC1H,
+ },
+ {
+ .gpio_chip = {
+ .label = "FEC1L",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 104,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FEC1L,
+ .podr = MCFGPIO_PODR_FEC1L,
+ .ppdr = MCFGPIO_PPDSDR_FEC1L,
+ .setr = MCFGPIO_PPDSDR_FEC1L,
+ .clrr = MCFGPIO_PCLRR_FEC1L,
+ },
+ {
+ .gpio_chip = {
+ .label = "BS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 114,
+ .ngpio = 2,
+ },
+ .pddr = MCFGPIO_PDDR_BS,
+ .podr = MCFGPIO_PODR_BS,
+ .ppdr = MCFGPIO_PPDSDR_BS,
+ .setr = MCFGPIO_PPDSDR_BS,
+ .clrr = MCFGPIO_PCLRR_BS,
+ },
+ {
+ .gpio_chip = {
+ .label = "IRQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 121,
+ .ngpio = 7,
+ },
+ .pddr = MCFGPIO_PDDR_IRQ,
+ .podr = MCFGPIO_PODR_IRQ,
+ .ppdr = MCFGPIO_PPDSDR_IRQ,
+ .setr = MCFGPIO_PPDSDR_IRQ,
+ .clrr = MCFGPIO_PCLRR_IRQ,
+ },
+ {
+ .gpio_chip = {
+ .label = "USBH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 128,
+ .ngpio = 1,
+ },
+ .pddr = MCFGPIO_PDDR_USBH,
+ .podr = MCFGPIO_PODR_USBH,
+ .ppdr = MCFGPIO_PPDSDR_USBH,
+ .setr = MCFGPIO_PPDSDR_USBH,
+ .clrr = MCFGPIO_PCLRR_USBH,
+ },
+ {
+ .gpio_chip = {
+ .label = "USBL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 136,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_USBL,
+ .podr = MCFGPIO_PODR_USBL,
+ .ppdr = MCFGPIO_PPDSDR_USBL,
+ .setr = MCFGPIO_PPDSDR_USBL,
+ .clrr = MCFGPIO_PCLRR_USBL,
+ },
+ {
+ .gpio_chip = {
+ .label = "UARTH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 144,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_UARTH,
+ .podr = MCFGPIO_PODR_UARTH,
+ .ppdr = MCFGPIO_PPDSDR_UARTH,
+ .setr = MCFGPIO_PPDSDR_UARTH,
+ .clrr = MCFGPIO_PCLRR_UARTH,
+ },
+#endif
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/528x/Makefile b/arch/m68knommu/platform/528x/Makefile
index 26135d92b34d..3d90e6d92459 100644
--- a/arch/m68knommu/platform/528x/Makefile
+++ b/arch/m68knommu/platform/528x/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index a1d1a61c4fe6..6e608d1836f1 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -3,8 +3,8 @@
/*
* linux/arch/m68knommu/platform/528x/config.c
*
- * Sub-architcture dependant initialization code for the Motorola
- * 5280 and 5282 CPUs.
+ * Sub-architcture dependant initialization code for the Freescale
+ * 5280, 5281 and 5282 CPUs.
*
* Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 2001-2003, SnapGear Inc. (www.snapgear.com)
@@ -15,20 +15,13 @@
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
-#ifdef CONFIG_MTD_PARTITIONS
-#include <linux/mtd/partitions.h>
-#endif
-
/***************************************************************************/
static struct mcf_platform_uart m528x_uart_platform[] = {
@@ -91,23 +84,13 @@ static struct platform_device *m528x_devices[] __initdata = {
/***************************************************************************/
-#define INTC0 (MCF_MBAR + MCFICM_INTC0)
-
static void __init m528x_uart_init_line(int line, int irq)
{
u8 port;
- u32 imr;
if ((line < 0) || (line > 2))
return;
- /* level 6, line based priority */
- writeb(0x30+line, INTC0 + MCFINTC_ICR0 + MCFINT_UART0 + line);
-
- imr = readl(INTC0 + MCFINTC_IMRL);
- imr &= ~((1 << (irq - MCFINT_VECBASE)) | 1);
- writel(imr, INTC0 + MCFINTC_IMRL);
-
/* make sure PUAPAR is set for UART0 and UART1 */
if (line < 2) {
port = readb(MCF_MBAR + MCF5282_GPIO_PUAPAR);
@@ -129,21 +112,8 @@ static void __init m528x_uarts_init(void)
static void __init m528x_fec_init(void)
{
- u32 imr;
u16 v16;
- /* Unmask FEC interrupts at ColdFire interrupt controller */
- writeb(0x28, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 23);
- writeb(0x27, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 27);
- writeb(0x26, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0 + 29);
-
- imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
- imr &= ~0xf;
- writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
- imr = readl(MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
- imr &= ~0xff800001;
- writel(imr, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
-
/* Set multi-function pins to ethernet mode for fec0 */
v16 = readw(MCF_IPSBAR + 0x100056);
writew(v16 | 0xf00, MCF_IPSBAR + 0x100056);
@@ -152,21 +122,6 @@ static void __init m528x_fec_init(void)
/***************************************************************************/
-void mcf_disableall(void)
-{
- *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH)) = 0xffffffff;
- *((volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL)) = 0xffffffff;
-}
-
-/***************************************************************************/
-
-void mcf_autovector(unsigned int vec)
-{
- /* Everything is auto-vectored on the 5272 */
-}
-
-/***************************************************************************/
-
static void m528x_cpu_reset(void)
{
local_irq_disable();
@@ -204,8 +159,6 @@ void wildfiremod_halt(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_disableall();
-
#ifdef CONFIG_WILDFIRE
mach_halt = wildfire_halt;
#endif
diff --git a/arch/m68knommu/platform/528x/gpio.c b/arch/m68knommu/platform/528x/gpio.c
new file mode 100644
index 000000000000..ec593950696a
--- /dev/null
+++ b/arch/m68knommu/platform/528x/gpio.c
@@ -0,0 +1,438 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "NQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .base = 1,
+ .ngpio = 8,
+ },
+ .pddr = MCFEPORT_EPDDR,
+ .podr = MCFEPORT_EPDR,
+ .ppdr = MCFEPORT_EPPDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "TA",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 8,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPTA_GPTDDR,
+ .podr = MCFGPTA_GPTPORT,
+ .ppdr = MCFGPTB_GPTPORT,
+ },
+ {
+ .gpio_chip = {
+ .label = "TB",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 16,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPTB_GPTDDR,
+ .podr = MCFGPTB_GPTPORT,
+ .ppdr = MCFGPTB_GPTPORT,
+ },
+ {
+ .gpio_chip = {
+ .label = "QA",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 24,
+ .ngpio = 4,
+ },
+ .pddr = MCFQADC_DDRQA,
+ .podr = MCFQADC_PORTQA,
+ .ppdr = MCFQADC_PORTQA,
+ },
+ {
+ .gpio_chip = {
+ .label = "QB",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 32,
+ .ngpio = 4,
+ },
+ .pddr = MCFQADC_DDRQB,
+ .podr = MCFQADC_PORTQB,
+ .ppdr = MCFQADC_PORTQB,
+ },
+ {
+ .gpio_chip = {
+ .label = "A",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 40,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRA,
+ .podr = MCFGPIO_PORTA,
+ .ppdr = MCFGPIO_PORTAP,
+ .setr = MCFGPIO_SETA,
+ .clrr = MCFGPIO_CLRA,
+ },
+ {
+ .gpio_chip = {
+ .label = "B",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 48,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRB,
+ .podr = MCFGPIO_PORTB,
+ .ppdr = MCFGPIO_PORTBP,
+ .setr = MCFGPIO_SETB,
+ .clrr = MCFGPIO_CLRB,
+ },
+ {
+ .gpio_chip = {
+ .label = "C",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 56,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRC,
+ .podr = MCFGPIO_PORTC,
+ .ppdr = MCFGPIO_PORTCP,
+ .setr = MCFGPIO_SETC,
+ .clrr = MCFGPIO_CLRC,
+ },
+ {
+ .gpio_chip = {
+ .label = "D",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 64,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRD,
+ .podr = MCFGPIO_PORTD,
+ .ppdr = MCFGPIO_PORTDP,
+ .setr = MCFGPIO_SETD,
+ .clrr = MCFGPIO_CLRD,
+ },
+ {
+ .gpio_chip = {
+ .label = "E",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 72,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRE,
+ .podr = MCFGPIO_PORTE,
+ .ppdr = MCFGPIO_PORTEP,
+ .setr = MCFGPIO_SETE,
+ .clrr = MCFGPIO_CLRE,
+ },
+ {
+ .gpio_chip = {
+ .label = "F",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 80,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRF,
+ .podr = MCFGPIO_PORTF,
+ .ppdr = MCFGPIO_PORTFP,
+ .setr = MCFGPIO_SETF,
+ .clrr = MCFGPIO_CLRF,
+ },
+ {
+ .gpio_chip = {
+ .label = "G",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 88,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRG,
+ .podr = MCFGPIO_PORTG,
+ .ppdr = MCFGPIO_PORTGP,
+ .setr = MCFGPIO_SETG,
+ .clrr = MCFGPIO_CLRG,
+ },
+ {
+ .gpio_chip = {
+ .label = "H",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 96,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRH,
+ .podr = MCFGPIO_PORTH,
+ .ppdr = MCFGPIO_PORTHP,
+ .setr = MCFGPIO_SETH,
+ .clrr = MCFGPIO_CLRH,
+ },
+ {
+ .gpio_chip = {
+ .label = "J",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 104,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRJ,
+ .podr = MCFGPIO_PORTJ,
+ .ppdr = MCFGPIO_PORTJP,
+ .setr = MCFGPIO_SETJ,
+ .clrr = MCFGPIO_CLRJ,
+ },
+ {
+ .gpio_chip = {
+ .label = "DD",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 112,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDRDD,
+ .podr = MCFGPIO_PORTDD,
+ .ppdr = MCFGPIO_PORTDDP,
+ .setr = MCFGPIO_SETDD,
+ .clrr = MCFGPIO_CLRDD,
+ },
+ {
+ .gpio_chip = {
+ .label = "EH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 120,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDREH,
+ .podr = MCFGPIO_PORTEH,
+ .ppdr = MCFGPIO_PORTEHP,
+ .setr = MCFGPIO_SETEH,
+ .clrr = MCFGPIO_CLREH,
+ },
+ {
+ .gpio_chip = {
+ .label = "EL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 128,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_DDREL,
+ .podr = MCFGPIO_PORTEL,
+ .ppdr = MCFGPIO_PORTELP,
+ .setr = MCFGPIO_SETEL,
+ .clrr = MCFGPIO_CLREL,
+ },
+ {
+ .gpio_chip = {
+ .label = "AS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 136,
+ .ngpio = 6,
+ },
+ .pddr = MCFGPIO_DDRAS,
+ .podr = MCFGPIO_PORTAS,
+ .ppdr = MCFGPIO_PORTASP,
+ .setr = MCFGPIO_SETAS,
+ .clrr = MCFGPIO_CLRAS,
+ },
+ {
+ .gpio_chip = {
+ .label = "QS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 144,
+ .ngpio = 7,
+ },
+ .pddr = MCFGPIO_DDRQS,
+ .podr = MCFGPIO_PORTQS,
+ .ppdr = MCFGPIO_PORTQSP,
+ .setr = MCFGPIO_SETQS,
+ .clrr = MCFGPIO_CLRQS,
+ },
+ {
+ .gpio_chip = {
+ .label = "SD",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 152,
+ .ngpio = 6,
+ },
+ .pddr = MCFGPIO_DDRSD,
+ .podr = MCFGPIO_PORTSD,
+ .ppdr = MCFGPIO_PORTSDP,
+ .setr = MCFGPIO_SETSD,
+ .clrr = MCFGPIO_CLRSD,
+ },
+ {
+ .gpio_chip = {
+ .label = "TC",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 160,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_DDRTC,
+ .podr = MCFGPIO_PORTTC,
+ .ppdr = MCFGPIO_PORTTCP,
+ .setr = MCFGPIO_SETTC,
+ .clrr = MCFGPIO_CLRTC,
+ },
+ {
+ .gpio_chip = {
+ .label = "TD",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 168,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_DDRTD,
+ .podr = MCFGPIO_PORTTD,
+ .ppdr = MCFGPIO_PORTTDP,
+ .setr = MCFGPIO_SETTD,
+ .clrr = MCFGPIO_CLRTD,
+ },
+ {
+ .gpio_chip = {
+ .label = "UA",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 176,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_DDRUA,
+ .podr = MCFGPIO_PORTUA,
+ .ppdr = MCFGPIO_PORTUAP,
+ .setr = MCFGPIO_SETUA,
+ .clrr = MCFGPIO_CLRUA,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index cfd586860fd8..667db6598451 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y += config.o
+obj-y += config.o gpio.o
diff --git a/arch/m68knommu/platform/5307/config.c b/arch/m68knommu/platform/5307/config.c
index 39da9e9ff674..00900ac06a9c 100644
--- a/arch/m68knommu/platform/5307/config.c
+++ b/arch/m68knommu/platform/5307/config.c
@@ -21,12 +21,6 @@
/***************************************************************************/
-extern unsigned int mcf_timervector;
-extern unsigned int mcf_profilevector;
-extern unsigned int mcf_timerlevel;
-
-/***************************************************************************/
-
/*
* Some platforms need software versions of the GPIO data registers.
*/
@@ -64,11 +58,11 @@ static void __init m5307_uart_init_line(int line, int irq)
if (line == 0) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
+ mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
+ mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
@@ -83,35 +77,19 @@ static void __init m5307_uarts_init(void)
/***************************************************************************/
-void mcf_autovector(unsigned int vec)
-{
- volatile unsigned char *mbar;
-
- if ((vec >= 25) && (vec <= 31)) {
- mbar = (volatile unsigned char *) MCF_MBAR;
- vec = 0x1 << (vec - 24);
- *(mbar + MCFSIM_AVR) |= vec;
- mcf_setimr(mcf_getimr() & ~vec);
- }
-}
-
-/***************************************************************************/
-
-void mcf_settimericr(unsigned int timer, unsigned int level)
+static void __init m5307_timers_init(void)
{
- volatile unsigned char *icrp;
- unsigned int icr, imr;
-
- if (timer <= 2) {
- switch (timer) {
- case 2: icr = MCFSIM_TIMER2ICR; imr = MCFSIM_IMR_TIMER2; break;
- default: icr = MCFSIM_TIMER1ICR; imr = MCFSIM_IMR_TIMER1; break;
- }
-
- icrp = (volatile unsigned char *) (MCF_MBAR + icr);
- *icrp = MCFSIM_ICR_AUTOVEC | (level << 2) | MCFSIM_ICR_PRI3;
- mcf_setimr(mcf_getimr() & ~imr);
- }
+ /* Timer1 is always used as system timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER1ICR);
+ mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
+
+#ifdef CONFIG_HIGHPROFILE
+ /* Timer2 is to be used as a high speed profile timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER2ICR);
+ mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
+#endif
}
/***************************************************************************/
@@ -129,20 +107,22 @@ void m5307_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_setimr(MCFSIM_IMR_MASKALL);
-
#if defined(CONFIG_NETtel) || \
defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
- /* Different timer setup - to prevent device clash */
- mcf_timervector = 30;
- mcf_profilevector = 31;
- mcf_timerlevel = 6;
#endif
mach_reset = m5307_cpu_reset;
+ m5307_timers_init();
+ m5307_uarts_init();
+
+ /* Only support the external interrupts on their primary level */
+ mcf_mapirq2imr(25, MCFINTC_EINT1);
+ mcf_mapirq2imr(27, MCFINTC_EINT3);
+ mcf_mapirq2imr(29, MCFINTC_EINT5);
+ mcf_mapirq2imr(31, MCFINTC_EINT7);
#ifdef CONFIG_BDM_DISABLE
/*
@@ -158,7 +138,6 @@ void __init config_BSP(char *commandp, int size)
static int __init init_BSP(void)
{
- m5307_uarts_init();
platform_add_devices(m5307_devices, ARRAY_SIZE(m5307_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5307/gpio.c b/arch/m68knommu/platform/5307/gpio.c
new file mode 100644
index 000000000000..8da5880e4066
--- /dev/null
+++ b/arch/m68knommu/platform/5307/gpio.c
@@ -0,0 +1,49 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PP",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 16,
+ },
+ .pddr = MCFSIM_PADDR,
+ .podr = MCFSIM_PADAT,
+ .ppdr = MCFSIM_PADAT,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/532x/Makefile b/arch/m68knommu/platform/532x/Makefile
index e431912f5628..4cc23245bcd1 100644
--- a/arch/m68knommu/platform/532x/Makefile
+++ b/arch/m68knommu/platform/532x/Makefile
@@ -15,4 +15,4 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
#obj-y := config.o usb-mcf532x.o spi-mcf532x.o
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
index cdb761971f7a..d632948e64e5 100644
--- a/arch/m68knommu/platform/532x/config.c
+++ b/arch/m68knommu/platform/532x/config.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
@@ -31,12 +30,6 @@
/***************************************************************************/
-extern unsigned int mcf_timervector;
-extern unsigned int mcf_profilevector;
-extern unsigned int mcf_timerlevel;
-
-/***************************************************************************/
-
static struct mcf_platform_uart m532x_uart_platform[] = {
{
.mapbase = MCFUART_BASE1,
@@ -88,6 +81,7 @@ static struct platform_device m532x_fec = {
.num_resources = ARRAY_SIZE(m532x_fec_resources),
.resource = m532x_fec_resources,
};
+
static struct platform_device *m532x_devices[] __initdata = {
&m532x_uart,
&m532x_fec,
@@ -98,18 +92,11 @@ static struct platform_device *m532x_devices[] __initdata = {
static void __init m532x_uart_init_line(int line, int irq)
{
if (line == 0) {
- MCF_INTC0_ICR26 = 0x3;
- MCF_INTC0_CIMR = 26;
/* GPIO initialization */
MCF_GPIO_PAR_UART |= 0x000F;
} else if (line == 1) {
- MCF_INTC0_ICR27 = 0x3;
- MCF_INTC0_CIMR = 27;
/* GPIO initialization */
MCF_GPIO_PAR_UART |= 0x0FF0;
- } else if (line == 2) {
- MCF_INTC0_ICR28 = 0x3;
- MCF_INTC0_CIMR = 28;
}
}
@@ -125,14 +112,6 @@ static void __init m532x_uarts_init(void)
static void __init m532x_fec_init(void)
{
- /* Unmask FEC interrupts at ColdFire interrupt controller */
- MCF_INTC0_ICR36 = 0x2;
- MCF_INTC0_ICR40 = 0x2;
- MCF_INTC0_ICR42 = 0x2;
-
- MCF_INTC0_IMRH &= ~(MCF_INTC_IMRH_INT_MASK36 |
- MCF_INTC_IMRH_INT_MASK40 | MCF_INTC_IMRH_INT_MASK42);
-
/* Set multi-function pins to ethernet mode for fec0 */
MCF_GPIO_PAR_FECI2C |= (MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO);
@@ -142,26 +121,6 @@ static void __init m532x_fec_init(void)
/***************************************************************************/
-void mcf_settimericr(unsigned int timer, unsigned int level)
-{
- volatile unsigned char *icrp;
- unsigned int icr;
- unsigned char irq;
-
- if (timer <= 2) {
- switch (timer) {
- case 2: irq = 33; icr = MCFSIM_ICR_TIMER2; break;
- default: irq = 32; icr = MCFSIM_ICR_TIMER1; break;
- }
-
- icrp = (volatile unsigned char *) (icr);
- *icrp = level;
- mcf_enable_irq0(irq);
- }
-}
-
-/***************************************************************************/
-
static void m532x_cpu_reset(void)
{
local_irq_disable();
@@ -172,8 +131,6 @@ static void m532x_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_setimr(MCFSIM_IMR_MASKALL);
-
#if !defined(CONFIG_BOOTPARAM)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0x4000, 4);
@@ -185,10 +142,6 @@ void __init config_BSP(char *commandp, int size)
}
#endif
- mcf_timervector = 64+32;
- mcf_profilevector = 64+33;
- mach_reset = m532x_cpu_reset;
-
#ifdef CONFIG_BDM_DISABLE
/*
* Disable the BDM clocking. This also turns off most of the rest of
@@ -438,8 +391,8 @@ void gpio_init(void)
/* Initialize TIN3 as a GPIO output to enable the write
half of the latch */
MCF_GPIO_PAR_TIMER = 0x00;
- MCF_GPIO_PDDR_TIMER = 0x08;
- MCF_GPIO_PCLRR_TIMER = 0x0;
+ __raw_writeb(0x08, MCFGPIO_PDDR_TIMER);
+ __raw_writeb(0x00, MCFGPIO_PCLRR_TIMER);
}
diff --git a/arch/m68knommu/platform/532x/gpio.c b/arch/m68knommu/platform/532x/gpio.c
new file mode 100644
index 000000000000..184b77382c3d
--- /dev/null
+++ b/arch/m68knommu/platform/532x/gpio.c
@@ -0,0 +1,337 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PIRQ",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 8,
+ },
+ .pddr = MCFEPORT_EPDDR,
+ .podr = MCFEPORT_EPDR,
+ .ppdr = MCFEPORT_EPPDR,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 8,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FECH,
+ .podr = MCFGPIO_PODR_FECH,
+ .ppdr = MCFGPIO_PPDSDR_FECH,
+ .setr = MCFGPIO_PPDSDR_FECH,
+ .clrr = MCFGPIO_PCLRR_FECH,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 16,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_FECL,
+ .podr = MCFGPIO_PODR_FECL,
+ .ppdr = MCFGPIO_PPDSDR_FECL,
+ .setr = MCFGPIO_PPDSDR_FECL,
+ .clrr = MCFGPIO_PCLRR_FECL,
+ },
+ {
+ .gpio_chip = {
+ .label = "SSI",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 24,
+ .ngpio = 5,
+ },
+ .pddr = MCFGPIO_PDDR_SSI,
+ .podr = MCFGPIO_PODR_SSI,
+ .ppdr = MCFGPIO_PPDSDR_SSI,
+ .setr = MCFGPIO_PPDSDR_SSI,
+ .clrr = MCFGPIO_PCLRR_SSI,
+ },
+ {
+ .gpio_chip = {
+ .label = "BUSCTL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 32,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_BUSCTL,
+ .podr = MCFGPIO_PODR_BUSCTL,
+ .ppdr = MCFGPIO_PPDSDR_BUSCTL,
+ .setr = MCFGPIO_PPDSDR_BUSCTL,
+ .clrr = MCFGPIO_PCLRR_BUSCTL,
+ },
+ {
+ .gpio_chip = {
+ .label = "BE",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 40,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_BE,
+ .podr = MCFGPIO_PODR_BE,
+ .ppdr = MCFGPIO_PPDSDR_BE,
+ .setr = MCFGPIO_PPDSDR_BE,
+ .clrr = MCFGPIO_PCLRR_BE,
+ },
+ {
+ .gpio_chip = {
+ .label = "CS",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 49,
+ .ngpio = 5,
+ },
+ .pddr = MCFGPIO_PDDR_CS,
+ .podr = MCFGPIO_PODR_CS,
+ .ppdr = MCFGPIO_PPDSDR_CS,
+ .setr = MCFGPIO_PPDSDR_CS,
+ .clrr = MCFGPIO_PCLRR_CS,
+ },
+ {
+ .gpio_chip = {
+ .label = "PWM",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 58,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_PWM,
+ .podr = MCFGPIO_PODR_PWM,
+ .ppdr = MCFGPIO_PPDSDR_PWM,
+ .setr = MCFGPIO_PPDSDR_PWM,
+ .clrr = MCFGPIO_PCLRR_PWM,
+ },
+ {
+ .gpio_chip = {
+ .label = "FECI2C",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 64,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_FECI2C,
+ .podr = MCFGPIO_PODR_FECI2C,
+ .ppdr = MCFGPIO_PPDSDR_FECI2C,
+ .setr = MCFGPIO_PPDSDR_FECI2C,
+ .clrr = MCFGPIO_PCLRR_FECI2C,
+ },
+ {
+ .gpio_chip = {
+ .label = "UART",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 72,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_UART,
+ .podr = MCFGPIO_PODR_UART,
+ .ppdr = MCFGPIO_PPDSDR_UART,
+ .setr = MCFGPIO_PPDSDR_UART,
+ .clrr = MCFGPIO_PCLRR_UART,
+ },
+ {
+ .gpio_chip = {
+ .label = "QSPI",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 80,
+ .ngpio = 6,
+ },
+ .pddr = MCFGPIO_PDDR_QSPI,
+ .podr = MCFGPIO_PODR_QSPI,
+ .ppdr = MCFGPIO_PPDSDR_QSPI,
+ .setr = MCFGPIO_PPDSDR_QSPI,
+ .clrr = MCFGPIO_PCLRR_QSPI,
+ },
+ {
+ .gpio_chip = {
+ .label = "TIMER",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 88,
+ .ngpio = 4,
+ },
+ .pddr = MCFGPIO_PDDR_TIMER,
+ .podr = MCFGPIO_PODR_TIMER,
+ .ppdr = MCFGPIO_PPDSDR_TIMER,
+ .setr = MCFGPIO_PPDSDR_TIMER,
+ .clrr = MCFGPIO_PCLRR_TIMER,
+ },
+ {
+ .gpio_chip = {
+ .label = "LCDDATAH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 96,
+ .ngpio = 2,
+ },
+ .pddr = MCFGPIO_PDDR_LCDDATAH,
+ .podr = MCFGPIO_PODR_LCDDATAH,
+ .ppdr = MCFGPIO_PPDSDR_LCDDATAH,
+ .setr = MCFGPIO_PPDSDR_LCDDATAH,
+ .clrr = MCFGPIO_PCLRR_LCDDATAH,
+ },
+ {
+ .gpio_chip = {
+ .label = "LCDDATAM",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 104,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_LCDDATAM,
+ .podr = MCFGPIO_PODR_LCDDATAM,
+ .ppdr = MCFGPIO_PPDSDR_LCDDATAM,
+ .setr = MCFGPIO_PPDSDR_LCDDATAM,
+ .clrr = MCFGPIO_PCLRR_LCDDATAM,
+ },
+ {
+ .gpio_chip = {
+ .label = "LCDDATAL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 112,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_LCDDATAL,
+ .podr = MCFGPIO_PODR_LCDDATAL,
+ .ppdr = MCFGPIO_PPDSDR_LCDDATAL,
+ .setr = MCFGPIO_PPDSDR_LCDDATAL,
+ .clrr = MCFGPIO_PCLRR_LCDDATAL,
+ },
+ {
+ .gpio_chip = {
+ .label = "LCDCTLH",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 120,
+ .ngpio = 1,
+ },
+ .pddr = MCFGPIO_PDDR_LCDCTLH,
+ .podr = MCFGPIO_PODR_LCDCTLH,
+ .ppdr = MCFGPIO_PPDSDR_LCDCTLH,
+ .setr = MCFGPIO_PPDSDR_LCDCTLH,
+ .clrr = MCFGPIO_PCLRR_LCDCTLH,
+ },
+ {
+ .gpio_chip = {
+ .label = "LCDCTLL",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value_fast,
+ .base = 128,
+ .ngpio = 8,
+ },
+ .pddr = MCFGPIO_PDDR_LCDCTLL,
+ .podr = MCFGPIO_PODR_LCDCTLL,
+ .ppdr = MCFGPIO_PPDSDR_LCDCTLL,
+ .setr = MCFGPIO_PPDSDR_LCDCTLL,
+ .clrr = MCFGPIO_PCLRR_LCDCTLL,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/5407/Makefile b/arch/m68knommu/platform/5407/Makefile
index e6035e7a2d3f..dee62c5dbaa6 100644
--- a/arch/m68knommu/platform/5407/Makefile
+++ b/arch/m68knommu/platform/5407/Makefile
@@ -14,5 +14,5 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y := config.o
+obj-y := config.o gpio.o
diff --git a/arch/m68knommu/platform/5407/config.c b/arch/m68knommu/platform/5407/config.c
index b41d942bf8d0..70ea789a400c 100644
--- a/arch/m68knommu/platform/5407/config.c
+++ b/arch/m68knommu/platform/5407/config.c
@@ -20,12 +20,6 @@
/***************************************************************************/
-extern unsigned int mcf_timervector;
-extern unsigned int mcf_profilevector;
-extern unsigned int mcf_timerlevel;
-
-/***************************************************************************/
-
static struct mcf_platform_uart m5407_uart_platform[] = {
{
.mapbase = MCF_MBAR + MCFUART_BASE1,
@@ -55,11 +49,11 @@ static void __init m5407_uart_init_line(int line, int irq)
if (line == 0) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART1);
+ mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR);
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_UART2);
+ mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
@@ -74,35 +68,19 @@ static void __init m5407_uarts_init(void)
/***************************************************************************/
-void mcf_autovector(unsigned int vec)
-{
- volatile unsigned char *mbar;
-
- if ((vec >= 25) && (vec <= 31)) {
- mbar = (volatile unsigned char *) MCF_MBAR;
- vec = 0x1 << (vec - 24);
- *(mbar + MCFSIM_AVR) |= vec;
- mcf_setimr(mcf_getimr() & ~vec);
- }
-}
-
-/***************************************************************************/
-
-void mcf_settimericr(unsigned int timer, unsigned int level)
+static void __init m5407_timers_init(void)
{
- volatile unsigned char *icrp;
- unsigned int icr, imr;
-
- if (timer <= 2) {
- switch (timer) {
- case 2: icr = MCFSIM_TIMER2ICR; imr = MCFSIM_IMR_TIMER2; break;
- default: icr = MCFSIM_TIMER1ICR; imr = MCFSIM_IMR_TIMER1; break;
- }
-
- icrp = (volatile unsigned char *) (MCF_MBAR + icr);
- *icrp = MCFSIM_ICR_AUTOVEC | (level << 2) | MCFSIM_ICR_PRI3;
- mcf_setimr(mcf_getimr() & ~imr);
- }
+ /* Timer1 is always used as system timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER1ICR);
+ mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
+
+#ifdef CONFIG_HIGHPROFILE
+ /* Timer2 is to be used as a high speed profile timer */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
+ MCF_MBAR + MCFSIM_TIMER2ICR);
+ mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
+#endif
}
/***************************************************************************/
@@ -120,23 +98,21 @@ void m5407_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
- mcf_setimr(MCFSIM_IMR_MASKALL);
-
-#if defined(CONFIG_CLEOPATRA)
- /* Different timer setup - to prevent device clash */
- mcf_timervector = 30;
- mcf_profilevector = 31;
- mcf_timerlevel = 6;
-#endif
-
mach_reset = m5407_cpu_reset;
+ m5407_timers_init();
+ m5407_uarts_init();
+
+ /* Only support the external interrupts on their primary level */
+ mcf_mapirq2imr(25, MCFINTC_EINT1);
+ mcf_mapirq2imr(27, MCFINTC_EINT3);
+ mcf_mapirq2imr(29, MCFINTC_EINT5);
+ mcf_mapirq2imr(31, MCFINTC_EINT7);
}
/***************************************************************************/
static int __init init_BSP(void)
{
- m5407_uarts_init();
platform_add_devices(m5407_devices, ARRAY_SIZE(m5407_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5407/gpio.c b/arch/m68knommu/platform/5407/gpio.c
new file mode 100644
index 000000000000..8da5880e4066
--- /dev/null
+++ b/arch/m68knommu/platform/5407/gpio.c
@@ -0,0 +1,49 @@
+/*
+ * Coldfire generic GPIO support
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+static struct mcf_gpio_chip mcf_gpio_chips[] = {
+ {
+ .gpio_chip = {
+ .label = "PP",
+ .request = mcf_gpio_request,
+ .free = mcf_gpio_free,
+ .direction_input = mcf_gpio_direction_input,
+ .direction_output = mcf_gpio_direction_output,
+ .get = mcf_gpio_get_value,
+ .set = mcf_gpio_set_value,
+ .ngpio = 16,
+ },
+ .pddr = MCFSIM_PADDR,
+ .podr = MCFSIM_PADAT,
+ .ppdr = MCFSIM_PADAT,
+ },
+};
+
+static int __init mcf_gpio_init(void)
+{
+ unsigned i = 0;
+ while (i < ARRAY_SIZE(mcf_gpio_chips))
+ (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
+ return 0;
+}
+
+core_initcall(mcf_gpio_init);
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index 72e56d554f4f..dab623389a01 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -73,34 +73,6 @@ extern e_vector *_ramvec;
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
-/*
- * This function should be called during kernel startup to initialize
- * the machine vector table.
- */
-void __init init_vectors(void)
-{
- int i;
-
- /* set up the vectors */
- for (i = 72; i < 256; ++i)
- _ramvec[i] = (e_vector) bad_interrupt;
-
- _ramvec[32] = system_call;
-
- _ramvec[65] = (e_vector) inthandler1;
- _ramvec[66] = (e_vector) inthandler2;
- _ramvec[67] = (e_vector) inthandler3;
- _ramvec[68] = (e_vector) inthandler4;
- _ramvec[69] = (e_vector) inthandler5;
- _ramvec[70] = (e_vector) inthandler6;
- _ramvec[71] = (e_vector) inthandler7;
-
- IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
-
- /* turn off all interrupts */
- IMR = ~0;
-}
-
/* The 68k family did not have a good way to determine the source
* of interrupts until later in the family. The EC000 core does
* not provide the vector number on the stack, we vector everything
@@ -163,18 +135,54 @@ void process_int(int vec, struct pt_regs *fp)
}
}
-void enable_vector(unsigned int irq)
+static void intc_irq_unmask(unsigned int irq)
{
IMR &= ~(1<<irq);
}
-void disable_vector(unsigned int irq)
+static void intc_irq_mask(unsigned int irq)
{
IMR |= (1<<irq);
}
-void ack_vector(unsigned int irq)
+static struct irq_chip intc_irq_chip = {
+ .name = "M68K-INTC",
+ .mask = intc_irq_mask,
+ .unmask = intc_irq_unmask,
+};
+
+/*
+ * This function should be called during kernel startup to initialize
+ * the machine vector table.
+ */
+void __init init_IRQ(void)
{
- /* Nothing needed */
+ int i;
+
+ /* set up the vectors */
+ for (i = 72; i < 256; ++i)
+ _ramvec[i] = (e_vector) bad_interrupt;
+
+ _ramvec[32] = system_call;
+
+ _ramvec[65] = (e_vector) inthandler1;
+ _ramvec[66] = (e_vector) inthandler2;
+ _ramvec[67] = (e_vector) inthandler3;
+ _ramvec[68] = (e_vector) inthandler4;
+ _ramvec[69] = (e_vector) inthandler5;
+ _ramvec[70] = (e_vector) inthandler6;
+ _ramvec[71] = (e_vector) inthandler7;
+
+ IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */
+
+ /* turn off all interrupts */
+ IMR = ~0;
+
+ for (i = 0; (i < NR_IRQS); i++) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].action = NULL;
+ irq_desc[i].depth = 1;
+ irq_desc[i].chip = &intc_irq_chip;
+ }
}
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index c36781157e09..1143f77caca4 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -37,11 +37,33 @@ extern void *_ramvec[];
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
+static void intc_irq_unmask(unsigned int irq)
+{
+ pquicc->intr_cimr |= (1 << irq);
+}
+
+static void intc_irq_mask(unsigned int irq)
+{
+ pquicc->intr_cimr &= ~(1 << irq);
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+ pquicc->intr_cisr = (1 << irq);
+}
+
+static struct irq_chip intc_irq_chip = {
+ .name = "M68K-INTC",
+ .mask = intc_irq_mask,
+ .unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
+};
+
/*
* This function should be called during kernel startup to initialize
* the vector table.
*/
-void init_vectors(void)
+void init_IRQ(void)
{
int i;
int vba = (CPM_VECTOR_BASE<<4);
@@ -109,20 +131,12 @@ void init_vectors(void)
/* turn off all CPM interrupts */
pquicc->intr_cimr = 0x00000000;
-}
-
-void enable_vector(unsigned int irq)
-{
- pquicc->intr_cimr |= (1 << irq);
-}
-void disable_vector(unsigned int irq)
-{
- pquicc->intr_cimr &= ~(1 << irq);
-}
-
-void ack_vector(unsigned int irq)
-{
- pquicc->intr_cisr = (1 << irq);
+ for (i = 0; (i < NR_IRQS); i++) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].action = NULL;
+ irq_desc[i].depth = 1;
+ irq_desc[i].chip = &intc_irq_chip;
+ }
}
diff --git a/arch/m68knommu/platform/coldfire/Makefile b/arch/m68knommu/platform/coldfire/Makefile
index 1bcb9372353f..24ea95a23128 100644
--- a/arch/m68knommu/platform/coldfire/Makefile
+++ b/arch/m68knommu/platform/coldfire/Makefile
@@ -15,16 +15,17 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-$(CONFIG_COLDFIRE) += clk.o dma.o entry.o vectors.o
-obj-$(CONFIG_M5206) += timers.o
-obj-$(CONFIG_M5206e) += timers.o
-obj-$(CONFIG_M520x) += pit.o
-obj-$(CONFIG_M523x) += pit.o dma_timer.o
-obj-$(CONFIG_M5249) += timers.o
-obj-$(CONFIG_M527x) += pit.o
-obj-$(CONFIG_M5272) += timers.o
-obj-$(CONFIG_M528x) += pit.o
-obj-$(CONFIG_M5307) += timers.o
-obj-$(CONFIG_M532x) += timers.o
-obj-$(CONFIG_M5407) += timers.o
+obj-$(CONFIG_M5206) += timers.o intc.o
+obj-$(CONFIG_M5206e) += timers.o intc.o
+obj-$(CONFIG_M520x) += pit.o intc-simr.o
+obj-$(CONFIG_M523x) += pit.o dma_timer.o intc-2.o
+obj-$(CONFIG_M5249) += timers.o intc.o
+obj-$(CONFIG_M527x) += pit.o intc-2.o
+obj-$(CONFIG_M5272) += timers.o intc.o
+obj-$(CONFIG_M528x) += pit.o intc-2.o
+obj-$(CONFIG_M5307) += timers.o intc.o
+obj-$(CONFIG_M532x) += timers.o intc-simr.o
+obj-$(CONFIG_M5407) += timers.o intc.o
+obj-y += pinmux.o gpio.o
extra-y := head.o
diff --git a/arch/m68knommu/platform/coldfire/gpio.c b/arch/m68knommu/platform/coldfire/gpio.c
new file mode 100644
index 000000000000..ff0045793450
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/gpio.c
@@ -0,0 +1,127 @@
+/*
+ * Coldfire generic GPIO support.
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+
+#include <asm/gpio.h>
+#include <asm/pinmux.h>
+#include <asm/mcfgpio.h>
+
+#define MCF_CHIP(chip) container_of(chip, struct mcf_gpio_chip, gpio_chip)
+
+int mcf_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long flags;
+ MCFGPIO_PORTTYPE dir;
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ local_irq_save(flags);
+ dir = mcfgpio_read(mcf_chip->pddr);
+ dir &= ~mcfgpio_bit(chip->base + offset);
+ mcfgpio_write(dir, mcf_chip->pddr);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+int mcf_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ return mcfgpio_read(mcf_chip->ppdr) & mcfgpio_bit(chip->base + offset);
+}
+
+int mcf_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ unsigned long flags;
+ MCFGPIO_PORTTYPE data;
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ local_irq_save(flags);
+ /* write the value to the output latch */
+ data = mcfgpio_read(mcf_chip->podr);
+ if (value)
+ data |= mcfgpio_bit(chip->base + offset);
+ else
+ data &= ~mcfgpio_bit(chip->base + offset);
+ mcfgpio_write(data, mcf_chip->podr);
+
+ /* now set the direction to output */
+ data = mcfgpio_read(mcf_chip->pddr);
+ data |= mcfgpio_bit(chip->base + offset);
+ mcfgpio_write(data, mcf_chip->pddr);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+void mcf_gpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ unsigned long flags;
+ MCFGPIO_PORTTYPE data;
+
+ local_irq_save(flags);
+ data = mcfgpio_read(mcf_chip->podr);
+ if (value)
+ data |= mcfgpio_bit(chip->base + offset);
+ else
+ data &= ~mcfgpio_bit(chip->base + offset);
+ mcfgpio_write(data, mcf_chip->podr);
+ local_irq_restore(flags);
+}
+
+void mcf_gpio_set_value_fast(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ if (value)
+ mcfgpio_write(mcfgpio_bit(chip->base + offset), mcf_chip->setr);
+ else
+ mcfgpio_write(~mcfgpio_bit(chip->base + offset), mcf_chip->clrr);
+}
+
+int mcf_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ return mcf_chip->gpio_to_pinmux ?
+ mcf_pinmux_request(mcf_chip->gpio_to_pinmux[offset], 0) : 0;
+}
+
+void mcf_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+
+ mcf_gpio_direction_input(chip, offset);
+
+ if (mcf_chip->gpio_to_pinmux)
+ mcf_pinmux_release(mcf_chip->gpio_to_pinmux[offset], 0);
+}
+
+struct sysdev_class mcf_gpio_sysclass = {
+ .name = "gpio",
+};
+
+static int __init mcf_gpio_sysinit(void)
+{
+ return sysdev_class_register(&mcf_gpio_sysclass);
+}
+
+core_initcall(mcf_gpio_sysinit);
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c
new file mode 100644
index 000000000000..5598c8b8661f
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/intc-2.c
@@ -0,0 +1,93 @@
+/*
+ * intc-1.c
+ *
+ * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/traps.h>
+
+/*
+ * Each vector needs a unique priority and level asscoiated with it.
+ * We don't really care so much what they are, we don't rely on the
+ * tranditional priority interrupt scheme of the m68k/ColdFire.
+ */
+static u8 intc_intpri = 0x36;
+
+static void intc_irq_mask(unsigned int irq)
+{
+ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + 128)) {
+ unsigned long imraddr;
+ u32 val, imrbit;
+
+ irq -= MCFINT_VECBASE;
+ imraddr = MCF_IPSBAR;
+ imraddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
+ imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL;
+ imrbit = 0x1 << (irq & 0x1f);
+
+ val = __raw_readl(imraddr);
+ __raw_writel(val | imrbit, imraddr);
+ }
+}
+
+static void intc_irq_unmask(unsigned int irq)
+{
+ if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + 128)) {
+ unsigned long intaddr, imraddr, icraddr;
+ u32 val, imrbit;
+
+ irq -= MCFINT_VECBASE;
+ intaddr = MCF_IPSBAR;
+ intaddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
+ imraddr = intaddr + ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL);
+ icraddr = intaddr + MCFINTC_ICR0 + (irq & 0x3f);
+ imrbit = 0x1 << (irq & 0x1f);
+
+ /* Don't set the "maskall" bit! */
+ if ((irq & 0x20) == 0)
+ imrbit |= 0x1;
+
+ if (__raw_readb(icraddr) == 0)
+ __raw_writeb(intc_intpri--, icraddr);
+
+ val = __raw_readl(imraddr);
+ __raw_writel(val & ~imrbit, imraddr);
+ }
+}
+
+static struct irq_chip intc_irq_chip = {
+ .name = "CF-INTC",
+ .mask = intc_irq_mask,
+ .unmask = intc_irq_unmask,
+};
+
+void __init init_IRQ(void)
+{
+ int irq;
+
+ init_vectors();
+
+ /* Mask all interrupt sources */
+ __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
+ __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL);
+
+ for (irq = 0; (irq < NR_IRQS); irq++) {
+ irq_desc[irq].status = IRQ_DISABLED;
+ irq_desc[irq].action = NULL;
+ irq_desc[irq].depth = 1;
+ irq_desc[irq].chip = &intc_irq_chip;
+ }
+}
+
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c
new file mode 100644
index 000000000000..1b01e79c2f63
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/intc-simr.c
@@ -0,0 +1,78 @@
+/*
+ * intc-simr.c
+ *
+ * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/traps.h>
+
+static void intc_irq_mask(unsigned int irq)
+{
+ if (irq >= MCFINT_VECBASE) {
+ if (irq < MCFINT_VECBASE + 64)
+ __raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_SIMR);
+ else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_SIMR)
+ __raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_SIMR);
+ }
+}
+
+static void intc_irq_unmask(unsigned int irq)
+{
+ if (irq >= MCFINT_VECBASE) {
+ if (irq < MCFINT_VECBASE + 64)
+ __raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_CIMR);
+ else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_CIMR)
+ __raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_CIMR);
+ }
+}
+
+static int intc_irq_set_type(unsigned int irq, unsigned int type)
+{
+ if (irq >= MCFINT_VECBASE) {
+ if (irq < MCFINT_VECBASE + 64)
+ __raw_writeb(5, MCFINTC0_ICR0 + irq - MCFINT_VECBASE);
+ else if ((irq < MCFINT_VECBASE) && MCFINTC1_ICR0)
+ __raw_writeb(5, MCFINTC1_ICR0 + irq - MCFINT_VECBASE - 64);
+ }
+ return 0;
+}
+
+static struct irq_chip intc_irq_chip = {
+ .name = "CF-INTC",
+ .mask = intc_irq_mask,
+ .unmask = intc_irq_unmask,
+ .set_type = intc_irq_set_type,
+};
+
+void __init init_IRQ(void)
+{
+ int irq;
+
+ init_vectors();
+
+ /* Mask all interrupt sources */
+ __raw_writeb(0xff, MCFINTC0_SIMR);
+ if (MCFINTC1_SIMR)
+ __raw_writeb(0xff, MCFINTC1_SIMR);
+
+ for (irq = 0; (irq < NR_IRQS); irq++) {
+ irq_desc[irq].status = IRQ_DISABLED;
+ irq_desc[irq].action = NULL;
+ irq_desc[irq].depth = 1;
+ irq_desc[irq].chip = &intc_irq_chip;
+ intc_irq_set_type(irq, 0);
+ }
+}
+
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c
new file mode 100644
index 000000000000..506df2c18fb6
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/intc.c
@@ -0,0 +1,153 @@
+/*
+ * intc.c -- support for the old ColdFire interrupt controller
+ *
+ * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/traps.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+/*
+ * The mapping of irq number to a mask register bit is not one-to-one.
+ * The irq numbers are either based on "level" of interrupt or fixed
+ * for an autovector-able interrupt. So we keep a local data structure
+ * that maps from irq to mask register. Not all interrupts will have
+ * an IMR bit.
+ */
+unsigned char mcf_irq2imr[NR_IRQS];
+
+/*
+ * Define the miniumun and maximum external interrupt numbers.
+ * This is also used as the "level" interrupt numbers.
+ */
+#define EIRQ1 25
+#define EIRQ7 31
+
+/*
+ * In the early version 2 core ColdFire parts the IMR register was 16 bits
+ * in size. Version 3 (and later version 2) core parts have a 32 bit
+ * sized IMR register. Provide some size independant methods to access the
+ * IMR register.
+ */
+#ifdef MCFSIM_IMR_IS_16BITS
+
+void mcf_setimr(int index)
+{
+ u16 imr;
+ imr = __raw_readw(MCF_MBAR + MCFSIM_IMR);
+ __raw_writew(imr | (0x1 << index), MCF_MBAR + MCFSIM_IMR);
+}
+
+void mcf_clrimr(int index)
+{
+ u16 imr;
+ imr = __raw_readw(MCF_MBAR + MCFSIM_IMR);
+ __raw_writew(imr & ~(0x1 << index), MCF_MBAR + MCFSIM_IMR);
+}
+
+void mcf_maskimr(unsigned int mask)
+{
+ u16 imr;
+ imr = __raw_readw(MCF_MBAR + MCFSIM_IMR);
+ imr |= mask;
+ __raw_writew(imr, MCF_MBAR + MCFSIM_IMR);
+}
+
+#else
+
+void mcf_setimr(int index)
+{
+ u32 imr;
+ imr = __raw_readl(MCF_MBAR + MCFSIM_IMR);
+ __raw_writel(imr | (0x1 << index), MCF_MBAR + MCFSIM_IMR);
+}
+
+void mcf_clrimr(int index)
+{
+ u32 imr;
+ imr = __raw_readl(MCF_MBAR + MCFSIM_IMR);
+ __raw_writel(imr & ~(0x1 << index), MCF_MBAR + MCFSIM_IMR);
+}
+
+void mcf_maskimr(unsigned int mask)
+{
+ u32 imr;
+ imr = __raw_readl(MCF_MBAR + MCFSIM_IMR);
+ imr |= mask;
+ __raw_writel(imr, MCF_MBAR + MCFSIM_IMR);
+}
+
+#endif
+
+/*
+ * Interrupts can be "vectored" on the ColdFire cores that support this old
+ * interrupt controller. That is, the device raising the interrupt can also
+ * supply the vector number to interrupt through. The AVR register of the
+ * interrupt controller enables or disables this for each external interrupt,
+ * so provide generic support for this. Setting this up is out-of-band for
+ * the interrupt system API's, and needs to be done by the driver that
+ * supports this device. Very few devices actually use this.
+ */
+void mcf_autovector(int irq)
+{
+#ifdef MCFSIM_AVR
+ if ((irq >= EIRQ1) && (irq <= EIRQ7)) {
+ u8 avec;
+ avec = __raw_readb(MCF_MBAR + MCFSIM_AVR);
+ avec |= (0x1 << (irq - EIRQ1 + 1));
+ __raw_writeb(avec, MCF_MBAR + MCFSIM_AVR);
+ }
+#endif
+}
+
+static void intc_irq_mask(unsigned int irq)
+{
+ if (mcf_irq2imr[irq])
+ mcf_setimr(mcf_irq2imr[irq]);
+}
+
+static void intc_irq_unmask(unsigned int irq)
+{
+ if (mcf_irq2imr[irq])
+ mcf_clrimr(mcf_irq2imr[irq]);
+}
+
+static int intc_irq_set_type(unsigned int irq, unsigned int type)
+{
+ return 0;
+}
+
+static struct irq_chip intc_irq_chip = {
+ .name = "CF-INTC",
+ .mask = intc_irq_mask,
+ .unmask = intc_irq_unmask,
+ .set_type = intc_irq_set_type,
+};
+
+void __init init_IRQ(void)
+{
+ int irq;
+
+ init_vectors();
+ mcf_maskimr(0xffffffff);
+
+ for (irq = 0; (irq < NR_IRQS); irq++) {
+ irq_desc[irq].status = IRQ_DISABLED;
+ irq_desc[irq].action = NULL;
+ irq_desc[irq].depth = 1;
+ irq_desc[irq].chip = &intc_irq_chip;
+ intc_irq_set_type(irq, 0);
+ }
+}
+
diff --git a/arch/m68knommu/platform/coldfire/pinmux.c b/arch/m68knommu/platform/coldfire/pinmux.c
new file mode 100644
index 000000000000..8c62b825939f
--- /dev/null
+++ b/arch/m68knommu/platform/coldfire/pinmux.c
@@ -0,0 +1,28 @@
+/*
+ * Coldfire generic GPIO pinmux support.
+ *
+ * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/pinmux.h>
+
+int mcf_pinmux_request(unsigned pinmux, unsigned func)
+{
+ return 0;
+}
+
+void mcf_pinmux_release(unsigned pinmux, unsigned func)
+{
+}
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c
index 61b96211f8ff..d8720ee34510 100644
--- a/arch/m68knommu/platform/coldfire/pit.c
+++ b/arch/m68knommu/platform/coldfire/pit.c
@@ -32,7 +32,6 @@
*/
#define FREQ ((MCF_CLK / 2) / 64)
#define TA(a) (MCF_IPSBAR + MCFPIT_BASE1 + (a))
-#define INTC0 (MCF_IPSBAR + MCFICM_INTC0)
#define PIT_CYCLES_PER_JIFFY (FREQ / HZ)
static u32 pit_cnt;
@@ -154,8 +153,6 @@ static struct clocksource pit_clk = {
void hw_timer_init(void)
{
- u32 imr;
-
cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id());
cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
cf_pit_clockevent.max_delta_ns =
@@ -166,11 +163,6 @@ void hw_timer_init(void)
setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq);
- __raw_writeb(ICR_INTRCONF, INTC0 + MCFINTC_ICR0 + MCFINT_PIT1);
- imr = __raw_readl(INTC0 + MCFPIT_IMR);
- imr &= ~MCFPIT_IMR_IBIT;
- __raw_writel(imr, INTC0 + MCFPIT_IMR);
-
pit_clk.mult = clocksource_hz2mult(FREQ, pit_clk.shift);
clocksource_register(&pit_clk);
}
diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c
index 1ba8a3731653..2304d736c701 100644
--- a/arch/m68knommu/platform/coldfire/timers.c
+++ b/arch/m68knommu/platform/coldfire/timers.c
@@ -31,19 +31,9 @@
#define TA(a) (MCF_MBAR + MCFTIMER_BASE1 + (a))
/*
- * Default the timer and vector to use for ColdFire. Some ColdFire
- * CPU's and some boards may want different. Their sub-architecture
- * startup code (in config.c) can change these if they want.
- */
-unsigned int mcf_timervector = 29;
-unsigned int mcf_profilevector = 31;
-unsigned int mcf_timerlevel = 5;
-
-/*
* These provide the underlying interrupt vector support.
* Unfortunately it is a little different on each ColdFire.
*/
-extern void mcf_settimericr(int timer, int level);
void coldfire_profile_init(void);
#if defined(CONFIG_M532x)
@@ -107,8 +97,6 @@ static struct clocksource mcftmr_clk = {
void hw_timer_init(void)
{
- setup_irq(mcf_timervector, &mcftmr_timer_irq);
-
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
mcftmr_cycles_per_jiffy = FREQ / HZ;
/*
@@ -124,7 +112,7 @@ void hw_timer_init(void)
mcftmr_clk.mult = clocksource_hz2mult(FREQ, mcftmr_clk.shift);
clocksource_register(&mcftmr_clk);
- mcf_settimericr(1, mcf_timerlevel);
+ setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq);
#ifdef CONFIG_HIGHPROFILE
coldfire_profile_init();
@@ -171,8 +159,6 @@ void coldfire_profile_init(void)
printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n",
PROFILEHZ);
- setup_irq(mcf_profilevector, &coldfire_profile_irq);
-
/* Set up TIMER 2 as high speed profile clock */
__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
@@ -180,7 +166,7 @@ void coldfire_profile_init(void)
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
- mcf_settimericr(2, 7);
+ setup_irq(MCF_IRQ_PROFILER, &coldfire_profile_irq);
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/coldfire/vectors.c b/arch/m68knommu/platform/coldfire/vectors.c
index bdca0297fa9a..a21d3f870b7a 100644
--- a/arch/m68knommu/platform/coldfire/vectors.c
+++ b/arch/m68knommu/platform/coldfire/vectors.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/5307/vectors.c
+ * linux/arch/m68knommu/platform/coldfire/vectors.c
*
* Copyright (C) 1999-2007, Greg Ungerer <gerg@snapgear.com>
*/
@@ -15,7 +15,6 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-#include <asm/mcfdma.h>
#include <asm/mcfwdebug.h>
/***************************************************************************/
@@ -79,20 +78,3 @@ void __init init_vectors(void)
}
/***************************************************************************/
-
-void enable_vector(unsigned int irq)
-{
- /* Currently no action on ColdFire */
-}
-
-void disable_vector(unsigned int irq)
-{
- /* Currently no action on ColdFire */
-}
-
-void ack_vector(unsigned int irq)
-{
- /* Currently no action on ColdFire */
-}
-
-/***************************************************************************/
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index b50b845fdd50..2db722d80d4d 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -53,6 +53,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
config GENERIC_GPIO
def_bool y
+config GENERIC_CSUM
+ def_bool y
+
config PCI
def_bool n
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
index 0de612ad7cb2..6d2e1d418be7 100644
--- a/arch/microblaze/include/asm/atomic.h
+++ b/arch/microblaze/include/asm/atomic.h
@@ -1,95 +1,7 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
#ifndef _ASM_MICROBLAZE_ATOMIC_H
#define _ASM_MICROBLAZE_ATOMIC_H
-#include <linux/types.h>
-#include <linux/compiler.h> /* likely */
-#include <asm/system.h> /* local_irq_XXX and friends */
-
-#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-#define atomic_inc(v) (atomic_add_return(1, (v)))
-#define atomic_dec(v) (atomic_sub_return(1, (v)))
-
-#define atomic_add(i, v) (atomic_add_return(i, (v)))
-#define atomic_sub(i, v) (atomic_sub_return(i, (v)))
-
-#define atomic_inc_return(v) (atomic_add_return(1, (v)))
-#define atomic_dec_return(v) (atomic_sub_return(1, (v)))
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc_not_zero(v) (atomic_add_unless((v), 1, 0))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- unsigned long flags;
-
- local_irq_save(flags);
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- local_irq_restore(flags);
-
- return ret;
-}
-
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
-
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c != u;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- *addr &= ~mask;
- local_irq_restore(flags);
-}
-
-/**
- * atomic_add_return - add and return
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int val;
-
- local_irq_save(flags);
- val = v->counter;
- v->counter = val += i;
- local_irq_restore(flags);
-
- return val;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- return atomic_add_return(-i, v);
-}
+#include <asm-generic/atomic.h>
/*
* Atomically test *v and decrement if it is greater than 0.
@@ -109,15 +21,4 @@ static inline int atomic_dec_if_positive(atomic_t *v)
return res;
}
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#include <asm-generic/atomic-long.h>
-
#endif /* _ASM_MICROBLAZE_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/bitops.h b/arch/microblaze/include/asm/bitops.h
index d6df1fd4e1e8..a72468f15c8b 100644
--- a/arch/microblaze/include/asm/bitops.h
+++ b/arch/microblaze/include/asm/bitops.h
@@ -1,27 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_BITOPS_H
-#define _ASM_MICROBLAZE_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-#include <asm/byteorder.h> /* swab32 */
-#include <asm/system.h> /* save_flags */
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
#include <asm-generic/bitops.h>
-#include <asm-generic/bitops/__fls.h>
-
-#endif /* _ASM_MICROBLAZE_BITOPS_H */
diff --git a/arch/microblaze/include/asm/bug.h b/arch/microblaze/include/asm/bug.h
index 8eb2cdde11d7..b12fd89e42e9 100644
--- a/arch/microblaze/include/asm/bug.h
+++ b/arch/microblaze/include/asm/bug.h
@@ -1,15 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_BUG_H
-#define _ASM_MICROBLAZE_BUG_H
-
-#include <linux/kernel.h>
#include <asm-generic/bug.h>
-
-#endif /* _ASM_MICROBLAZE_BUG_H */
diff --git a/arch/microblaze/include/asm/bugs.h b/arch/microblaze/include/asm/bugs.h
index f2c6593653fb..61791e1ad9f5 100644
--- a/arch/microblaze/include/asm/bugs.h
+++ b/arch/microblaze/include/asm/bugs.h
@@ -1,17 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_BUGS_H
-#define _ASM_MICROBLAZE_BUGS_H
-
-static inline void check_bugs(void)
-{
- /* nothing to do */
-}
-
-#endif /* _ASM_MICROBLAZE_BUGS_H */
+#include <asm-generic/bugs.h>
diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h
index 97ea46b5cf80..128bf03b54b7 100644
--- a/arch/microblaze/include/asm/checksum.h
+++ b/arch/microblaze/include/asm/checksum.h
@@ -10,12 +10,11 @@
#ifndef _ASM_MICROBLAZE_CHECKSUM_H
#define _ASM_MICROBLAZE_CHECKSUM_H
-#include <linux/in6.h>
-
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
+#define csum_tcpudp_nofold csum_tcpudp_nofold
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
@@ -30,71 +29,6 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
return sum;
}
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy(const void *src, void *dst, int len,
- __wsum sum);
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err);
-
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy((src), (dst), (len), (sum))
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- *
- */
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-/*
- * Fold a partial checksum
- */
-static inline __sum16 csum_fold(__wsum csum)
-{
- u32 sum = (__force u32)csum;
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __sum16)~sum;
-}
-
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-extern __sum16 ip_compute_csum(const void *buff, int len);
+#include <asm-generic/checksum.h>
#endif /* _ASM_MICROBLAZE_CHECKSUM_H */
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index c042830793ed..b8f654dd62d1 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -16,6 +16,21 @@ struct dev_archdata {
struct device_node *of_node;
};
+static inline void dev_archdata_set_node(struct dev_archdata *ad,
+ struct device_node *np)
+{
+ ad->of_node = np;
+}
+
+static inline struct device_node *
+dev_archdata_get_node(const struct dev_archdata *ad)
+{
+ return ad->of_node;
+}
+
+struct pdev_archdata {
+};
+
#endif /* _ASM_MICROBLAZE_DEVICE_H */
diff --git a/arch/microblaze/include/asm/fb.h b/arch/microblaze/include/asm/fb.h
new file mode 100644
index 000000000000..3a4988e8df45
--- /dev/null
+++ b/arch/microblaze/include/asm/fb.h
@@ -0,0 +1 @@
+#include <asm-generic/fb.h>
diff --git a/arch/microblaze/include/asm/hardirq.h b/arch/microblaze/include/asm/hardirq.h
index 0f2d6b013e11..41e1e1aa36ac 100644
--- a/arch/microblaze/include/asm/hardirq.h
+++ b/arch/microblaze/include/asm/hardirq.h
@@ -9,21 +9,11 @@
#ifndef _ASM_MICROBLAZE_HARDIRQ_H
#define _ASM_MICROBLAZE_HARDIRQ_H
-#include <linux/cache.h>
-#include <linux/irq.h>
-#include <asm/irq.h>
-#include <asm/current.h>
-#include <linux/ptrace.h>
-
/* should be defined in each interrupt controller driver */
extern unsigned int get_irq(struct pt_regs *regs);
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
+#define ack_bad_irq ack_bad_irq
void ack_bad_irq(unsigned int irq);
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <asm-generic/hardirq.h>
#endif /* _ASM_MICROBLAZE_HARDIRQ_H */
diff --git a/arch/microblaze/include/asm/ioctls.h b/arch/microblaze/include/asm/ioctls.h
index 03582b249204..ec34c760665e 100644
--- a/arch/microblaze/include/asm/ioctls.h
+++ b/arch/microblaze/include/asm/ioctls.h
@@ -1,91 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_IOCTLS_H
-#define _ASM_MICROBLAZE_IOCTLS_H
-
-#include <linux/ioctl.h>
-
-/* 0x54 is just a magic number to make these relatively unique ('T') */
-
-#define TCGETS 0x5401
-#define TCSETS 0x5402
-#define TCSETSW 0x5403
-#define TCSETSF 0x5404
-#define TCGETA 0x5405
-#define TCSETA 0x5406
-#define TCSETAW 0x5407
-#define TCSETAF 0x5408
-#define TCSBRK 0x5409
-#define TCXONC 0x540A
-#define TCFLSH 0x540B
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-#define TIOCGPGRP 0x540F
-#define TIOCSPGRP 0x5410
-#define TIOCOUTQ 0x5411
-#define TIOCSTI 0x5412
-#define TIOCGWINSZ 0x5413
-#define TIOCSWINSZ 0x5414
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define FIONREAD 0x541B
-#define TIOCINQ FIONREAD
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-#define FIONBIO 0x5421
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-/* Get Pty Number (of pty-mux device) */
-#define TIOCGPTN _IOR('T', 0x30, unsigned int)
-#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
-
-#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
-#define FIOCLEX 0x5451
-#define FIOASYNC 0x5452
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-
-#define FIOQSIZE 0x545E
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-#endif /* _ASM_MICROBLAZE_IOCTLS_H */
+#include <asm-generic/ioctls.h>
diff --git a/arch/microblaze/include/asm/ipcbuf.h b/arch/microblaze/include/asm/ipcbuf.h
index b056fa420654..84c7e51cb6d0 100644
--- a/arch/microblaze/include/asm/ipcbuf.h
+++ b/arch/microblaze/include/asm/ipcbuf.h
@@ -1,36 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_IPCBUF_H
-#define _ASM_MICROBLAZE_IPCBUF_H
-
-/*
- * The user_ipc_perm structure for microblaze architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm {
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_MICROBLAZE_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index db515deaa720..90f050535ebe 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -10,6 +10,7 @@
#define _ASM_MICROBLAZE_IRQ_H
#define NR_IRQS 32
+#include <asm-generic/irq.h>
#include <linux/interrupt.h>
@@ -17,11 +18,6 @@ extern unsigned int nr_irq;
#define NO_IRQ (-1)
-static inline int irq_canonicalize(int irq)
-{
- return irq;
-}
-
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
diff --git a/arch/microblaze/include/asm/mman.h b/arch/microblaze/include/asm/mman.h
index 4914b1329445..8eebf89f5ab1 100644
--- a/arch/microblaze/include/asm/mman.h
+++ b/arch/microblaze/include/asm/mman.h
@@ -1,25 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_MMAN_H
-#define _ASM_MICROBLAZE_MMAN_H
-
#include <asm-generic/mman.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* _ASM_MICROBLAZE_MMAN_H */
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h
index 66cad6a99d77..8d6a654ceffb 100644
--- a/arch/microblaze/include/asm/mmu.h
+++ b/arch/microblaze/include/asm/mmu.h
@@ -12,12 +12,7 @@
#define _ASM_MICROBLAZE_MMU_H
# ifndef CONFIG_MMU
-# ifndef __ASSEMBLY__
-typedef struct {
- struct vm_list_struct *vmlist;
- unsigned long end_brk;
-} mm_context_t;
-# endif /* __ASSEMBLY__ */
+# include <asm-generic/mmu.h>
# else /* CONFIG_MMU */
# ifdef __KERNEL__
# ifndef __ASSEMBLY__
diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h
index 385fed16bbfb..24eab1674d3e 100644
--- a/arch/microblaze/include/asm/mmu_context.h
+++ b/arch/microblaze/include/asm/mmu_context.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_MMU
# include "mmu_context_mm.h"
#else
-# include "mmu_context_no.h"
+# include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/microblaze/include/asm/mmu_context_no.h b/arch/microblaze/include/asm/mmu_context_no.h
deleted file mode 100644
index ba5567190154..000000000000
--- a/arch/microblaze/include/asm/mmu_context_no.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2008-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
-#define _ASM_MICROBLAZE_MMU_CONTEXT_H
-
-# define init_new_context(tsk, mm) ({ 0; })
-
-# define enter_lazy_tlb(mm, tsk) do {} while (0)
-# define change_mm_context(old, ctx, _pml4) do {} while (0)
-# define destroy_context(mm) do {} while (0)
-# define deactivate_mm(tsk, mm) do {} while (0)
-# define switch_mm(prev, next, tsk) do {} while (0)
-# define activate_mm(prev, next) do {} while (0)
-
-#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/module.h b/arch/microblaze/include/asm/module.h
index 914565a90315..7be1347fce42 100644
--- a/arch/microblaze/include/asm/module.h
+++ b/arch/microblaze/include/asm/module.h
@@ -9,6 +9,8 @@
#ifndef _ASM_MICROBLAZE_MODULE_H
#define _ASM_MICROBLAZE_MODULE_H
+#include <asm-generic/module.h>
+
/* Microblaze Relocations */
#define R_MICROBLAZE_NONE 0
#define R_MICROBLAZE_32 1
@@ -24,14 +26,6 @@
/* Keep this the last entry. */
#define R_MICROBLAZE_NUM 11
-struct mod_arch_specific {
- int foo;
-};
-
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
-
typedef struct { volatile int counter; } module_t;
#endif /* _ASM_MICROBLAZE_MODULE_H */
diff --git a/arch/microblaze/include/asm/msgbuf.h b/arch/microblaze/include/asm/msgbuf.h
index 09dd97097211..809134c644a6 100644
--- a/arch/microblaze/include/asm/msgbuf.h
+++ b/arch/microblaze/include/asm/msgbuf.h
@@ -1,31 +1 @@
-#ifndef _ASM_MICROBLAZE_MSGBUF_H
-#define _ASM_MICROBLAZE_MSGBUF_H
-
-/*
- * The msqid64_ds structure for microblaze architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
- unsigned long __unused1;
- __kernel_time_t msg_rtime; /* last msgrcv time */
- unsigned long __unused2;
- __kernel_time_t msg_ctime; /* last change time */
- unsigned long __unused3;
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* _ASM_MICROBLAZE_MSGBUF_H */
+#include <asm-generic/msgbuf.h>
diff --git a/arch/microblaze/include/asm/param.h b/arch/microblaze/include/asm/param.h
index 8c538a49616d..965d45427975 100644
--- a/arch/microblaze/include/asm/param.h
+++ b/arch/microblaze/include/asm/param.h
@@ -1,30 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_PARAM_H
-#define _ASM_MICROBLAZE_PARAM_H
-
-#ifdef __KERNEL__
-#define HZ CONFIG_HZ /* internal kernel timer frequency */
-#define USER_HZ 100 /* for user interfaces in "ticks" */
-#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
-#endif /* __KERNEL__ */
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#endif /* _ASM_MICROBLAZE_PARAM_H */
+#include <asm-generic/param.h>
diff --git a/arch/microblaze/include/asm/parport.h b/arch/microblaze/include/asm/parport.h
new file mode 100644
index 000000000000..cf252af64590
--- /dev/null
+++ b/arch/microblaze/include/asm/parport.h
@@ -0,0 +1 @@
+#include <asm-generic/parport.h>
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index ca03794cf3f0..9f0df5faf2c8 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -1 +1 @@
-#include <linux/io.h>
+#include <asm-generic/pci.h>
diff --git a/arch/microblaze/include/asm/posix_types.h b/arch/microblaze/include/asm/posix_types.h
index 8c758b231f37..0e15039673e3 100644
--- a/arch/microblaze/include/asm/posix_types.h
+++ b/arch/microblaze/include/asm/posix_types.h
@@ -1,73 +1,9 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
#ifndef _ASM_MICROBLAZE_POSIX_TYPES_H
#define _ASM_MICROBLAZE_POSIX_TYPES_H
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
-typedef unsigned int __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef unsigned int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char *__kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-
-typedef unsigned int __kernel_old_uid_t;
-typedef unsigned int __kernel_old_gid_t;
-typedef unsigned int __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
-#if defined(__KERNEL__) || defined(__USE_ALL)
- int val[2];
-#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
- int __val[2];
-#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
-
-#undef __FD_SET
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-
-#undef __FD_CLR
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-
-#undef __FD_ISSET
-#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) (memset(fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+#define __kernel_mode_t __kernel_mode_t
-#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+#include <asm-generic/posix_types.h>
#endif /* _ASM_MICROBLAZE_POSIX_TYPES_H */
diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h
index 08ff1d049b42..35d786fe93ae 100644
--- a/arch/microblaze/include/asm/scatterlist.h
+++ b/arch/microblaze/include/asm/scatterlist.h
@@ -1,28 +1 @@
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SCATTERLIST_H
-#define _ASM_MICROBLAZE_SCATTERLIST_H
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- dma_addr_t dma_address;
- unsigned int offset;
- unsigned int length;
-};
-
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
-#define ISA_DMA_THRESHOLD (~0UL)
-
-#endif /* _ASM_MICROBLAZE_SCATTERLIST_H */
+#include <asm-generic/scatterlist.h>
diff --git a/arch/microblaze/include/asm/sembuf.h b/arch/microblaze/include/asm/sembuf.h
index b804ed71a57e..7673b83cfef7 100644
--- a/arch/microblaze/include/asm/sembuf.h
+++ b/arch/microblaze/include/asm/sembuf.h
@@ -1,34 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SEMBUF_H
-#define _ASM_MICROBLAZE_SEMBUF_H
-
-/*
- * The semid64_ds structure for microblaze architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- unsigned long __unused1;
- __kernel_time_t sem_ctime; /* last change time */
- unsigned long __unused2;
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-
-#endif /* _ASM_MICROBLAZE_SEMBUF_H */
+#include <asm-generic/sembuf.h>
diff --git a/arch/microblaze/include/asm/serial.h b/arch/microblaze/include/asm/serial.h
index 39bfc8ce6af5..a0cb0caff152 100644
--- a/arch/microblaze/include/asm/serial.h
+++ b/arch/microblaze/include/asm/serial.h
@@ -1,14 +1 @@
-/*
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SERIAL_H
-#define _ASM_MICROBLAZE_SERIAL_H
-
-# define BASE_BAUD (1843200 / 16)
-
-#endif /* _ASM_MICROBLAZE_SERIAL_H */
+#include <asm-generic/serial.h>
diff --git a/arch/microblaze/include/asm/shmbuf.h b/arch/microblaze/include/asm/shmbuf.h
index f829c5843618..83c05fc2de38 100644
--- a/arch/microblaze/include/asm/shmbuf.h
+++ b/arch/microblaze/include/asm/shmbuf.h
@@ -1,42 +1 @@
-#ifndef _ASM_MICROBLAZE_SHMBUF_H
-#define _ASM_MICROBLAZE_SHMBUF_H
-
-/*
- * The shmid64_ds structure for microblaze architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- unsigned long __unused1;
- __kernel_time_t shm_dtime; /* last detach time */
- unsigned long __unused2;
- __kernel_time_t shm_ctime; /* last change time */
- unsigned long __unused3;
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _ASM_MICROBLAZE_SHMBUF_H */
+#include <asm-generic/shmbuf.h>
diff --git a/arch/microblaze/include/asm/shmparam.h b/arch/microblaze/include/asm/shmparam.h
index 9f5fc2b3b6a3..93f30deb95d0 100644
--- a/arch/microblaze/include/asm/shmparam.h
+++ b/arch/microblaze/include/asm/shmparam.h
@@ -1,6 +1 @@
-#ifndef _ASM_MICROBLAZE_SHMPARAM_H
-#define _ASM_MICROBLAZE_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _ASM_MICROBLAZE_SHMPARAM_H */
+#include <asm-generic/shmparam.h>
diff --git a/arch/microblaze/include/asm/siginfo.h b/arch/microblaze/include/asm/siginfo.h
index f162911a8f50..0815d29d82e5 100644
--- a/arch/microblaze/include/asm/siginfo.h
+++ b/arch/microblaze/include/asm/siginfo.h
@@ -1,15 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SIGINFO_H
-#define _ASM_MICROBLAZE_SIGINFO_H
-
-#include <linux/types.h>
#include <asm-generic/siginfo.h>
-
-#endif /* _ASM_MICROBLAZE_SIGINFO_H */
diff --git a/arch/microblaze/include/asm/signal.h b/arch/microblaze/include/asm/signal.h
index 46bc2267d949..7b1573ce19de 100644
--- a/arch/microblaze/include/asm/signal.h
+++ b/arch/microblaze/include/asm/signal.h
@@ -1,165 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- * Yasushi SHOJI <yashi@atmark-techno.com>
- * Tetsuya OHKAWA <tetsuya@atmark-techno.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SIGNAL_H
-#define _ASM_MICROBLAZE_SIGNAL_H
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-# ifndef __ASSEMBLY__
-# include <linux/types.h>
-# include <asm-generic/signal-defs.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-# ifdef __KERNEL__
-/*
- * Most things should be clean enough to redefine this at will, if care
- * is taken to make libc match.
- */
-# define _NSIG 64
-# define _NSIG_BPW 32
-# define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-# include <asm/sigcontext.h>
-# undef __HAVE_ARCH_SIG_BITOPS
-
-# define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-# else /* !__KERNEL__ */
-
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-# define NSIG 32
-typedef unsigned long sigset_t;
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-# define sa_handler _u._sa_handler
-# define sa_sigaction _u._sa_sigaction
-
-# endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-# endif /* __ASSEMBLY__ */
-#endif /* _ASM_MICROBLAZE_SIGNAL_H */
+#include <asm-generic/signal.h>
diff --git a/arch/microblaze/include/asm/socket.h b/arch/microblaze/include/asm/socket.h
index 825936860314..6b71384b9d8b 100644
--- a/arch/microblaze/include/asm/socket.h
+++ b/arch/microblaze/include/asm/socket.h
@@ -1,69 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SOCKET_H
-#define _ASM_MICROBLAZE_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockoptions(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#endif /* _ASM_MICROBLAZE_SOCKET_H */
+#include <asm-generic/socket.h>
diff --git a/arch/microblaze/include/asm/sockios.h b/arch/microblaze/include/asm/sockios.h
index 9fff57a701e1..def6d4746ee7 100644
--- a/arch/microblaze/include/asm/sockios.h
+++ b/arch/microblaze/include/asm/sockios.h
@@ -1,23 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SOCKIOS_H
-#define _ASM_MICROBLAZE_SOCKIOS_H
-
-#include <linux/ioctl.h>
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* _ASM_MICROBLAZE_SOCKIOS_H */
+#include <asm-generic/sockios.h>
diff --git a/arch/microblaze/include/asm/stat.h b/arch/microblaze/include/asm/stat.h
index a15f77520bfd..3dc90fa92c70 100644
--- a/arch/microblaze/include/asm/stat.h
+++ b/arch/microblaze/include/asm/stat.h
@@ -1,68 +1 @@
-/*
- * Microblaze stat structure
- *
- * Copyright (C) 2001,02,03 NEC Electronics Corporation
- * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file COPYING in the main directory of this
- * archive for more details.
- *
- * Written by Miles Bader <miles@gnu.org>
- */
-
-#ifndef _ASM_MICROBLAZE_STAT_H
-#define _ASM_MICROBLAZE_STAT_H
-
-#include <linux/posix_types.h>
-
-#define STAT_HAVE_NSEC 1
-
-struct stat {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long st_rdev;
- unsigned long __pad1;
- long st_size;
- int st_blksize;
- int __pad2;
- long st_blocks;
- int st_atime;
- unsigned int st_atime_nsec;
- int st_mtime;
- unsigned int st_mtime_nsec;
- int st_ctime;
- unsigned int st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct stat64 {
- unsigned long long st_dev; /* Device. */
- unsigned long long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long long st_rdev; /* Device number, if device. */
- unsigned long long __pad1;
- long long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
- int __pad2;
- long long st_blocks; /* Number 512-byte blocks allocated. */
- int st_atime; /* Time of last access. */
- unsigned int st_atime_nsec;
- int st_mtime; /* Time of last modification. */
- unsigned int st_mtime_nsec;
- int st_ctime; /* Time of last status change. */
- unsigned int st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-#endif /* _ASM_MICROBLAZE_STAT_H */
-
+#include <asm-generic/stat.h>
diff --git a/arch/microblaze/include/asm/swab.h b/arch/microblaze/include/asm/swab.h
index b375d7b65ad7..7847e563ab66 100644
--- a/arch/microblaze/include/asm/swab.h
+++ b/arch/microblaze/include/asm/swab.h
@@ -1,8 +1 @@
-#ifndef _ASM_MICROBLAZE_SWAB_H
-#define _ASM_MICROBLAZE_SWAB_H
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#define __SWAB_64_THRU_32__
-#endif
-
-#endif /* _ASM_MICROBLAZE_SWAB_H */
+#include <asm-generic/swab.h>
diff --git a/arch/microblaze/include/asm/syscalls.h b/arch/microblaze/include/asm/syscalls.h
index ddea9eb31f8d..720761cc741f 100644
--- a/arch/microblaze/include/asm/syscalls.h
+++ b/arch/microblaze/include/asm/syscalls.h
@@ -1,48 +1,8 @@
#ifndef __ASM_MICROBLAZE_SYSCALLS_H
-#define __ASM_MICROBLAZE_SYSCALLS_H
-#ifdef __KERNEL__
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/types.h>
-#include <linux/signal.h>
+asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs);
+#define sys_clone sys_clone
-/* FIXME will be removed */
-asmlinkage int sys_ipc(uint call, int first, int second,
- int third, void *ptr, long fifth);
+#include <asm-generic/syscalls.h>
-struct pt_regs;
-asmlinkage int sys_vfork(struct pt_regs *regs);
-asmlinkage int sys_clone(int flags, unsigned long stack, struct pt_regs *regs);
-asmlinkage int sys_execve(char __user *filenamei, char __user *__user *argv,
- char __user *__user *envp, struct pt_regs *regs);
-
-asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-
-asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset);
-
-/* from signal.c */
-asmlinkage int sys_sigsuspend(old_sigset_t mask, struct pt_regs *regs);
-
-asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
- struct pt_regs *regs);
-
-asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act,
- struct old_sigaction *oact);
-
-asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact, size_t sigsetsize);
-
-asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- struct pt_regs *regs);
-
-asmlinkage int sys_sigreturn(struct pt_regs *regs);
-
-asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
-
-#endif /* __KERNEL__ */
#endif /* __ASM_MICROBLAZE_SYSCALLS_H */
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index c4e308850b5d..b1ed61590660 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -13,6 +13,9 @@
#include <asm/setup.h>
#include <asm/irqflags.h>
+#include <asm-generic/cmpxchg.h>
+#include <asm-generic/cmpxchg-local.h>
+
struct task_struct;
struct thread_info;
diff --git a/arch/microblaze/include/asm/termbits.h b/arch/microblaze/include/asm/termbits.h
index a1b64bc4724a..3935b106de79 100644
--- a/arch/microblaze/include/asm/termbits.h
+++ b/arch/microblaze/include/asm/termbits.h
@@ -1,203 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_TERMBITS_H
-#define _ASM_MICROBLAZE_TERMBITS_H
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define BOTHER 0010000
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate (not used) */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-
-/* tcflow() and TCXONC use these */
-
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* _ASM_MICROBLAZE_TERMBITS_H */
+#include <asm-generic/termbits.h>
diff --git a/arch/microblaze/include/asm/termios.h b/arch/microblaze/include/asm/termios.h
index 47a46d1fbe26..280d78a9d966 100644
--- a/arch/microblaze/include/asm/termios.h
+++ b/arch/microblaze/include/asm/termios.h
@@ -1,88 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_TERMIOS_H
-#define _ASM_MICROBLAZE_TERMIOS_H
-
-#include <linux/string.h>
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-#ifdef __KERNEL__
-/* intr=^C quit=^| erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-#endif
-
-/* Modem lines */
-
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-/* Line disciplines */
-
-#define N_TTY 0
-#define N_SLIP 1
-#define N_MOUSE 2
-#define N_PPP 3
-#define N_STRIP 4
-#define N_AX25 5
-#define N_X25 6 /* X.25 async */
-#define N_6PACK 7
-#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
-#define N_R3964 9 /* Reserved for Simatic R3964 module */
-#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
-#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
-#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards
- about SMS messages */
-#define N_HDLC 13 /* synchronous HDLC */
-#define N_SYNC_PPP 14
-#define N_HCI 15 /* Bluetooth HCI UART */
-
-#ifdef __KERNEL__
-
-#include <asm-generic/termios-base.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_MICROBLAZE_TERMIOS_H */
+#include <asm-generic/termios.h>
diff --git a/arch/microblaze/include/asm/timex.h b/arch/microblaze/include/asm/timex.h
index 678525dc6d0b..befcf3de5532 100644
--- a/arch/microblaze/include/asm/timex.h
+++ b/arch/microblaze/include/asm/timex.h
@@ -9,10 +9,8 @@
#ifndef _ASM_MICROBLAZE_TIMEX_H
#define _ASM_MICROBLAZE_TIMEX_H
-#define CLOCK_TICK_RATE 1000 /* Timer input freq. */
-
-typedef unsigned long cycles_t;
+#include <asm-generic/timex.h>
-#define get_cycles() (0)
+#define CLOCK_TICK_RATE 1000 /* Timer input freq. */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/microblaze/include/asm/types.h b/arch/microblaze/include/asm/types.h
index bebc018318f5..b9e79bc580dd 100644
--- a/arch/microblaze/include/asm/types.h
+++ b/arch/microblaze/include/asm/types.h
@@ -1,38 +1 @@
-/*
- * Copyright (C) Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_TYPES_H
-#define _ASM_MICROBLAZE_TYPES_H
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-#include <asm-generic/int-ll64.h>
-
-# ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-# ifdef __KERNEL__
-# define BITS_PER_LONG 32
-
-/* Dma addresses are 32-bits wide. */
-
-typedef u32 dma_addr_t;
-
-# endif/* __KERNEL__ */
-# endif /* __ASSEMBLY__ */
-#endif /* _ASM_MICROBLAZE_TYPES_H */
+#include <asm-generic/types.h>
diff --git a/arch/microblaze/include/asm/ucontext.h b/arch/microblaze/include/asm/ucontext.h
index 11f6bb3ae3a4..9bc07b9f30fb 100644
--- a/arch/microblaze/include/asm/ucontext.h
+++ b/arch/microblaze/include/asm/ucontext.h
@@ -1,22 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_UCONTEXT_H
-#define _ASM_MICROBLAZE_UCONTEXT_H
-
-#include <asm/sigcontext.h>
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-#endif /* _ASM_MICROBLAZE_UCONTEXT_H */
+#include <asm-generic/ucontext.h>
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index b5e2f5fa5c53..0b852327c0e7 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -380,8 +380,10 @@
#define __NR_accept04 362 /* new */
#define __NR_preadv 363 /* new */
#define __NR_pwritev 364 /* new */
+#define __NR_rt_tgsigqueueinfo 365 /* new */
+#define __NR_perf_counter_open 366 /* new */
-#define __NR_syscalls 365
+#define __NR_syscalls 367
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -408,7 +410,7 @@
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
-/* #define __ARCH_WANT_SYS_RT_SIGSUSPEND */
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
/*
* "Conditional" syscalls
diff --git a/arch/microblaze/include/asm/vga.h b/arch/microblaze/include/asm/vga.h
index 8b137891791f..89d82fd8fcf1 100644
--- a/arch/microblaze/include/asm/vga.h
+++ b/arch/microblaze/include/asm/vga.h
@@ -1 +1 @@
-
+#include <asm-generic/vga.h>
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 1fce6b803f54..9083d85376a4 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -551,30 +551,22 @@ no_work_pending:
rtid r14, 0
nop
-sys_vfork_wrapper:
- brid sys_vfork
+sys_vfork:
+ brid microblaze_vfork
addk r5, r1, r0
-sys_clone_wrapper:
- brid sys_clone
+sys_clone:
+ brid microblaze_clone
addk r7, r1, r0
-sys_execve_wrapper:
- brid sys_execve
+sys_execve:
+ brid microblaze_execve
addk r8, r1, r0
-sys_sigreturn_wrapper:
- brid sys_sigreturn
- addk r5, r1, r0
-
sys_rt_sigreturn_wrapper:
brid sys_rt_sigreturn
addk r5, r1, r0
-sys_sigsuspend_wrapper:
- brid sys_rt_sigsuspend
- addk r6, r1, r0
-
sys_rt_sigsuspend_wrapper:
brid sys_rt_sigsuspend
addk r7, r1, r0
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 91a0e7b185dd..c7353e79f4a2 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -429,12 +429,11 @@ C_ENTRY(ret_from_fork):
brid ret_from_trap; /* Do normal trap return */
nop;
-C_ENTRY(sys_vfork_wrapper):
+C_ENTRY(sys_vfork):
+ brid microblaze_vfork /* Do real work (tail-call) */
la r5, r1, PTO
- brid sys_vfork /* Do real work (tail-call) */
- nop
-C_ENTRY(sys_clone_wrapper):
+C_ENTRY(sys_clone):
bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
1: la r7, r1, PTO; /* Arg 2: parent context */
@@ -444,20 +443,9 @@ C_ENTRY(sys_clone_wrapper):
brid do_fork /* Do real work (tail-call) */
nop;
-C_ENTRY(sys_execve_wrapper):
+C_ENTRY(sys_execve):
la r8, r1, PTO; /* add user context as 4th arg */
- brid sys_execve; /* Do real work (tail-call).*/
- nop;
-
-C_ENTRY(sys_sigsuspend_wrapper):
- swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- swi r4, r1, PTO+PT_R4;
- la r6, r1, PTO; /* add user context as 2nd arg */
- bralid r15, sys_sigsuspend; /* Do real work.*/
- nop;
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
- bri ret_from_trap /* fall through will not work here due to align */
+ brid microblaze_execve; /* Do real work (tail-call).*/
nop;
C_ENTRY(sys_rt_sigsuspend_wrapper):
@@ -471,18 +459,6 @@ C_ENTRY(sys_rt_sigsuspend_wrapper):
bri ret_from_trap /* fall through will not work here due to align */
nop;
-
-C_ENTRY(sys_sigreturn_wrapper):
- swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- swi r4, r1, PTO+PT_R4;
- la r5, r1, PTO; /* add user context as 1st arg */
- brlid r15, sys_sigreturn; /* Do real work.*/
- nop;
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
- bri ret_from_trap /* fall through will not work here due to align */
- nop;
-
C_ENTRY(sys_rt_sigreturn_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 4c0e6521b114..493819c25fba 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -45,91 +45,8 @@
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall);
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(old_sigset_t mask, struct pt_regs *regs)
-{
- sigset_t saveset;
-
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->r3 = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset, 1))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
- struct pt_regs *regs)
-{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->r3 = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset, 1))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction *act,
- struct old_sigaction *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- }
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
-
- return ret;
-}
-
-asmlinkage int
+asmlinkage long
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs)
{
@@ -139,7 +56,6 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
/*
* Do a signal return; undo the signal stack.
*/
-
struct sigframe {
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
@@ -176,40 +92,7 @@ static int restore_sigcontext(struct pt_regs *regs,
return err;
}
-asmlinkage int sys_sigreturn(struct pt_regs *regs)
-{
- struct sigframe *frame =
- (struct sigframe *)(regs->r1 + STATE_SAVE_ARG_SPACE);
-
- sigset_t set;
- int rval;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
-
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->sc, &rval))
- goto badframe;
- return rval;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE);
@@ -324,21 +207,17 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
/* minus 8 is offset to cater for "rtsd r15,8" */
- if (ka->sa.sa_flags & SA_RESTORER) {
- regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
- } else {
- /* addi r12, r0, __NR_sigreturn */
- err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
- frame->tramp + 0);
- /* brki r14, 0x8 */
- err |= __put_user(0xb9cc0008, frame->tramp + 1);
-
- /* Return from sighandler will jump to the tramp.
- Negative 8 offset because return is rtsd r15, 8 */
- regs->r15 = ((unsigned long)frame->tramp)-8;
-
- __invalidate_cache_sigtramp((unsigned long)frame->tramp);
- }
+ /* addi r12, r0, __NR_sigreturn */
+ err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
+ frame->tramp + 0);
+ /* brki r14, 0x8 */
+ err |= __put_user(0xb9cc0008, frame->tramp + 1);
+
+ /* Return from sighandler will jump to the tramp.
+ Negative 8 offset because return is rtsd r15, 8 */
+ regs->r15 = ((unsigned long)frame->tramp)-8;
+
+ __invalidate_cache_sigtramp((unsigned long)frame->tramp);
if (err)
goto give_sigsegv;
@@ -405,7 +284,7 @@ do_restart:
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
@@ -426,6 +305,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
+ return 1;
}
/*
@@ -456,7 +336,9 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_syscall)
if (kernel_mode(regs))
return 1;
- if (!oldset)
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ oldset = &current->saved_sigmask;
+ else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -464,13 +346,31 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_syscall)
/* Whee! Actually deliver the signal. */
if (in_syscall)
handle_restart(regs, &ka, 1);
- handle_signal(signr, &ka, &info, oldset, regs);
+ if (handle_signal(signr, &ka, &info, oldset, regs)) {
+ /*
+ * A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &=
+ ~TS_RESTORE_SIGMASK;
+ }
return 1;
}
if (in_syscall)
handle_restart(regs, NULL, 0);
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+
/* Did we come from a system call? */
return 0;
}
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 31905ff590b7..8c9ebac5da10 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -39,7 +39,7 @@
*
* This is really horribly ugly. This will be remove with new toolchain.
*/
-asmlinkage int
+asmlinkage long
sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth)
{
int version, ret;
@@ -134,20 +134,20 @@ sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth)
return ret;
}
-asmlinkage int sys_vfork(struct pt_regs *regs)
+asmlinkage long microblaze_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->r1,
regs, 0, NULL, NULL);
}
-asmlinkage int sys_clone(int flags, unsigned long stack, struct pt_regs *regs)
+asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs *regs)
{
if (!stack)
stack = regs->r1;
return do_fork(flags, stack, regs, 0, NULL, NULL);
}
-asmlinkage int sys_execve(char __user *filenamei, char __user *__user *argv,
+asmlinkage long microblaze_execve(char __user *filenamei, char __user *__user *argv,
char __user *__user *envp, struct pt_regs *regs)
{
int error;
@@ -163,8 +163,8 @@ out:
return error;
}
-asmlinkage unsigned long
-sys_mmap2(unsigned long addr, size_t len,
+asmlinkage long
+sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
@@ -189,18 +189,18 @@ out:
return ret;
}
-asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset)
+ unsigned long fd, off_t pgoff)
{
int err = -EINVAL;
- if (offset & ~PAGE_MASK) {
+ if (pgoff & ~PAGE_MASK) {
printk(KERN_INFO "no pagemask in mmap\r\n");
goto out;
}
- err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
out:
return err;
}
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 376d1789f7c0..31b32a6c5f4e 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -15,7 +15,7 @@ ENTRY(sys_call_table)
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
- .long sys_execve_wrapper
+ .long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
@@ -71,12 +71,12 @@ ENTRY(sys_call_table)
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
- .long sys_sigaction
+ .long sys_ni_syscall /* sys_sigaction */
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid /* 70 */
.long sys_setregid
- .long sys_sigsuspend_wrapper
+ .long sys_ni_syscall /* sys_sigsuspend_wrapper */
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
@@ -123,8 +123,8 @@ ENTRY(sys_call_table)
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
- .long sys_sigreturn_wrapper
- .long sys_clone_wrapper /* 120 */
+ .long sys_ni_syscall /* sys_sigreturn_wrapper */
+ .long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_ni_syscall /* modify_ldt */
@@ -194,7 +194,7 @@ ENTRY(sys_call_table)
.long sys_sendfile
.long sys_ni_syscall /* reserved for streams1 */
.long sys_ni_syscall /* reserved for streams2 */
- .long sys_vfork_wrapper /* 190 */
+ .long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2 /* mmap2 */
.long sys_truncate64
@@ -369,3 +369,5 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_ni_syscall
+ .long sys_rt_tgsigqueueinfo /* 365 */
+ .long sys_perf_counter_open
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index d34d38dcd12c..a207543c5927 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -162,4 +162,6 @@ SECTIONS {
}
. = ALIGN(4096);
_end = .;
+
+ /DISCARD/ : { *(.discard) }
}
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index 71c8cb6c9e43..b579db068c06 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile
#
-lib-y := memset.o checksum.o
+lib-y := memset.o
ifeq ($(CONFIG_OPT_LIB_ASM),y)
lib-y += fastcopy.o
diff --git a/arch/microblaze/lib/checksum.c b/arch/microblaze/lib/checksum.c
deleted file mode 100644
index f08e74591418..000000000000
--- a/arch/microblaze/lib/checksum.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- *
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
- * Fixed some nasty bugs, causing some horrible crashes.
- * A: At some points, the sum (%0) was used as
- * length-counter instead of the length counter
- * (%1). Thanks to Roman Hodek for pointing this out.
- * B: GCC seems to mess up if one uses too many
- * data-registers to hold input values and one tries to
- * specify d0 and d1 as scratch registers. Letting gcc
- * choose these registers itself solves the problem.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
- kills, so most of the assembly has to go. */
-
-#include <linux/module.h>
-#include <net/checksum.h>
-
-#include <asm/byteorder.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
- /* add up 16-bit and 16-bit for 16+c bit */
- x = (x & 0xffff) + (x >> 16);
- /* add up carry.. */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
-static unsigned int do_csum(const unsigned char *buff, int len)
-{
- int odd, count;
- unsigned long result = 0;
-
- if (len <= 0)
- goto out;
- odd = 1 & (unsigned long) buff;
- if (odd) {
- result = *buff;
- len--;
- buff++;
- }
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(unsigned short *) buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
- unsigned long carry = 0;
- do {
- unsigned long w = *(unsigned long *) buff;
- count--;
- buff += 4;
- result += carry;
- result += w;
- carry = (w > result);
- } while (count);
- result += carry;
- result = (result & 0xffff) + (result >> 16);
- }
- if (len & 2) {
- result += *(unsigned short *) buff;
- buff += 2;
- }
- }
- if (len & 1)
- result += (*buff << 8);
- result = from32to16(result);
- if (odd)
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
- return result;
-}
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
- return (__force __sum16)~do_csum(iph, ihl*4);
-}
-EXPORT_SYMBOL(ip_fast_csum);
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum wsum)
-{
- unsigned int sum = (__force unsigned int)wsum;
- unsigned int result = do_csum(buff, len);
-
- /* add in old sum, and carry.. */
- result += sum;
- if (sum > result)
- result += 1;
- return (__force __wsum)result;
-}
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
- return (__force __sum16)~do_csum(buff, len);
-}
-EXPORT_SYMBOL(ip_compute_csum);
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *csum_err)
-{
- int missing;
-
- missing = __copy_from_user(dst, src, len);
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *csum_err = -EFAULT;
- } else
- *csum_err = 0;
-
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8c4be1f301cf..3ca0fe1a9123 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -22,6 +22,26 @@ choice
config MACH_ALCHEMY
bool "Alchemy processor based machines"
+config AR7
+ bool "Texas Instruments AR7"
+ select BOOT_ELF32
+ select DMA_NONCOHERENT
+ select CEVT_R4K
+ select CSRC_R4K
+ select IRQ_CPU
+ select NO_EXCEPT_FILL
+ select SWAP_IO_SPACE
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_GPIO
+ select GCD
+ select VLYNQ
+ help
+ Support for the Texas Instruments AR7 System-on-a-Chip
+ family: TNETD7100, 7200 and 7300.
+
config BASLER_EXCITE
bool "Basler eXcite smart camera"
select CEVT_R4K
@@ -209,7 +229,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
- select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken
+ select SYS_SUPPORTS_MIPS_CMP
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_SMARTMIPS
help
@@ -247,6 +267,7 @@ config MACH_VR41XX
select CEVT_R4K
select CSRC_R4K
select SYS_HAS_CPU_VR41XX
+ select ARCH_REQUIRE_GPIOLIB
config NXP_STB220
bool "NXP STB220 board"
@@ -1635,7 +1656,7 @@ config MIPS_APSP_KSPD
config MIPS_CMP
bool "MIPS CMP framework support"
depends on SYS_SUPPORTS_MIPS_CMP
- select SYNC_R4K if BROKEN
+ select SYNC_R4K
select SYS_SUPPORTS_SMP
select SYS_SUPPORTS_SCHED_SMT if SMP
select WEAK_ORDERING
@@ -2147,11 +2168,11 @@ menu "Power management options"
config ARCH_HIBERNATION_POSSIBLE
def_bool y
- depends on SYS_SUPPORTS_HOTPLUG_CPU
+ depends on SYS_SUPPORTS_HOTPLUG_CPU || !SMP
config ARCH_SUSPEND_POSSIBLE
def_bool y
- depends on SYS_SUPPORTS_HOTPLUG_CPU
+ depends on SYS_SUPPORTS_HOTPLUG_CPU || !SMP
source "kernel/power/Kconfig"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 807572a6a4d2..861da514a468 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -173,6 +173,13 @@ libs-y += arch/mips/fw/lib/
#
#
+# Texas Instruments AR7
+#
+core-$(CONFIG_AR7) += arch/mips/ar7/
+cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
+load-$(CONFIG_AR7) += 0xffffffff94100000
+
+#
# Acer PICA 61, Mips Magnum 4000 and Olivetti M700.
#
core-$(CONFIG_MACH_JAZZ) += arch/mips/jazz/
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 33fbae79af5e..7e94a49224d7 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -89,7 +89,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
.irq = AU1000_RTC_MATCH2_INT,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
- .cpumask = CPU_MASK_ALL_PTR,
+ .cpumask = cpu_all_mask,
};
static struct irqaction au1x_rtcmatch2_irqaction = {
diff --git a/arch/mips/ar7/Makefile b/arch/mips/ar7/Makefile
new file mode 100644
index 000000000000..7435e44b3964
--- /dev/null
+++ b/arch/mips/ar7/Makefile
@@ -0,0 +1,10 @@
+
+obj-y := \
+ prom.o \
+ setup.o \
+ memory.o \
+ irq.o \
+ time.o \
+ platform.o \
+ gpio.o \
+ clock.o
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
new file mode 100644
index 000000000000..27dc6663f2fa
--- /dev/null
+++ b/arch/mips/ar7/clock.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/io.h>
+
+#include <asm/addrspace.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define BOOT_PLL_SOURCE_MASK 0x3
+#define CPU_PLL_SOURCE_SHIFT 16
+#define BUS_PLL_SOURCE_SHIFT 14
+#define USB_PLL_SOURCE_SHIFT 18
+#define DSP_PLL_SOURCE_SHIFT 22
+#define BOOT_PLL_SOURCE_AFE 0
+#define BOOT_PLL_SOURCE_BUS 0
+#define BOOT_PLL_SOURCE_REF 1
+#define BOOT_PLL_SOURCE_XTAL 2
+#define BOOT_PLL_SOURCE_CPU 3
+#define BOOT_PLL_BYPASS 0x00000020
+#define BOOT_PLL_ASYNC_MODE 0x02000000
+#define BOOT_PLL_2TO1_MODE 0x00008000
+
+#define TNETD7200_CLOCK_ID_CPU 0
+#define TNETD7200_CLOCK_ID_DSP 1
+#define TNETD7200_CLOCK_ID_USB 2
+
+#define TNETD7200_DEF_CPU_CLK 211000000
+#define TNETD7200_DEF_DSP_CLK 125000000
+#define TNETD7200_DEF_USB_CLK 48000000
+
+struct tnetd7300_clock {
+ u32 ctrl;
+#define PREDIV_MASK 0x001f0000
+#define PREDIV_SHIFT 16
+#define POSTDIV_MASK 0x0000001f
+ u32 unused1[3];
+ u32 pll;
+#define MUL_MASK 0x0000f000
+#define MUL_SHIFT 12
+#define PLL_MODE_MASK 0x00000001
+#define PLL_NDIV 0x00000800
+#define PLL_DIV 0x00000002
+#define PLL_STATUS 0x00000001
+ u32 unused2[3];
+};
+
+struct tnetd7300_clocks {
+ struct tnetd7300_clock bus;
+ struct tnetd7300_clock cpu;
+ struct tnetd7300_clock usb;
+ struct tnetd7300_clock dsp;
+};
+
+struct tnetd7200_clock {
+ u32 ctrl;
+ u32 unused1[3];
+#define DIVISOR_ENABLE_MASK 0x00008000
+ u32 mul;
+ u32 prediv;
+ u32 postdiv;
+ u32 postdiv2;
+ u32 unused2[6];
+ u32 cmd;
+ u32 status;
+ u32 cmden;
+ u32 padding[15];
+};
+
+struct tnetd7200_clocks {
+ struct tnetd7200_clock cpu;
+ struct tnetd7200_clock dsp;
+ struct tnetd7200_clock usb;
+};
+
+int ar7_cpu_clock = 150000000;
+EXPORT_SYMBOL(ar7_cpu_clock);
+int ar7_bus_clock = 125000000;
+EXPORT_SYMBOL(ar7_bus_clock);
+int ar7_dsp_clock;
+EXPORT_SYMBOL(ar7_dsp_clock);
+
+static void approximate(int base, int target, int *prediv,
+ int *postdiv, int *mul)
+{
+ int i, j, k, freq, res = target;
+ for (i = 1; i <= 16; i++)
+ for (j = 1; j <= 32; j++)
+ for (k = 1; k <= 32; k++) {
+ freq = abs(base / j * i / k - target);
+ if (freq < res) {
+ res = freq;
+ *mul = i;
+ *prediv = j;
+ *postdiv = k;
+ }
+ }
+}
+
+static void calculate(int base, int target, int *prediv, int *postdiv,
+ int *mul)
+{
+ int tmp_gcd, tmp_base, tmp_freq;
+
+ for (*prediv = 1; *prediv <= 32; (*prediv)++) {
+ tmp_base = base / *prediv;
+ tmp_gcd = gcd(target, tmp_base);
+ *mul = target / tmp_gcd;
+ *postdiv = tmp_base / tmp_gcd;
+ if ((*mul < 1) || (*mul >= 16))
+ continue;
+ if ((*postdiv > 0) & (*postdiv <= 32))
+ break;
+ }
+
+ if (base / *prediv * *mul / *postdiv != target) {
+ approximate(base, target, prediv, postdiv, mul);
+ tmp_freq = base / *prediv * *mul / *postdiv;
+ printk(KERN_WARNING
+ "Adjusted requested frequency %d to %d\n",
+ target, tmp_freq);
+ }
+
+ printk(KERN_DEBUG "Clocks: prediv: %d, postdiv: %d, mul: %d\n",
+ *prediv, *postdiv, *mul);
+}
+
+static int tnetd7300_dsp_clock(void)
+{
+ u32 didr1, didr2;
+ u8 rev = ar7_chip_rev();
+ didr1 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x18));
+ didr2 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x1c));
+ if (didr2 & (1 << 23))
+ return 0;
+ if ((rev >= 0x23) && (rev != 0x57))
+ return 250000000;
+ if ((((didr2 & 0x1fff) << 10) | ((didr1 & 0xffc00000) >> 22))
+ > 4208000)
+ return 250000000;
+ return 0;
+}
+
+static int tnetd7300_get_clock(u32 shift, struct tnetd7300_clock *clock,
+ u32 *bootcr, u32 bus_clock)
+{
+ int product;
+ int base_clock = AR7_REF_CLOCK;
+ u32 ctrl = readl(&clock->ctrl);
+ u32 pll = readl(&clock->pll);
+ int prediv = ((ctrl & PREDIV_MASK) >> PREDIV_SHIFT) + 1;
+ int postdiv = (ctrl & POSTDIV_MASK) + 1;
+ int divisor = prediv * postdiv;
+ int mul = ((pll & MUL_MASK) >> MUL_SHIFT) + 1;
+
+ switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
+ case BOOT_PLL_SOURCE_BUS:
+ base_clock = bus_clock;
+ break;
+ case BOOT_PLL_SOURCE_REF:
+ base_clock = AR7_REF_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_XTAL:
+ base_clock = AR7_XTAL_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_CPU:
+ base_clock = ar7_cpu_clock;
+ break;
+ }
+
+ if (*bootcr & BOOT_PLL_BYPASS)
+ return base_clock / divisor;
+
+ if ((pll & PLL_MODE_MASK) == 0)
+ return (base_clock >> (mul / 16 + 1)) / divisor;
+
+ if ((pll & (PLL_NDIV | PLL_DIV)) == (PLL_NDIV | PLL_DIV)) {
+ product = (mul & 1) ?
+ (base_clock * mul) >> 1 :
+ (base_clock * (mul - 1)) >> 2;
+ return product / divisor;
+ }
+
+ if (mul == 16)
+ return base_clock / divisor;
+
+ return base_clock * mul / divisor;
+}
+
+static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
+ u32 *bootcr, u32 frequency)
+{
+ int prediv, postdiv, mul;
+ int base_clock = ar7_bus_clock;
+
+ switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
+ case BOOT_PLL_SOURCE_BUS:
+ base_clock = ar7_bus_clock;
+ break;
+ case BOOT_PLL_SOURCE_REF:
+ base_clock = AR7_REF_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_XTAL:
+ base_clock = AR7_XTAL_CLOCK;
+ break;
+ case BOOT_PLL_SOURCE_CPU:
+ base_clock = ar7_cpu_clock;
+ break;
+ }
+
+ calculate(base_clock, frequency, &prediv, &postdiv, &mul);
+
+ writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
+ msleep(1);
+ writel(4, &clock->pll);
+ while (readl(&clock->pll) & PLL_STATUS)
+ ;
+ writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
+ msleep(75);
+}
+
+static void __init tnetd7300_init_clocks(void)
+{
+ u32 *bootcr = (u32 *)ioremap_nocache(AR7_REGS_DCL, 4);
+ struct tnetd7300_clocks *clocks =
+ ioremap_nocache(UR8_REGS_CLOCKS,
+ sizeof(struct tnetd7300_clocks));
+
+ ar7_bus_clock = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
+ &clocks->bus, bootcr, AR7_AFE_CLOCK);
+
+ if (*bootcr & BOOT_PLL_ASYNC_MODE)
+ ar7_cpu_clock = tnetd7300_get_clock(CPU_PLL_SOURCE_SHIFT,
+ &clocks->cpu, bootcr, AR7_AFE_CLOCK);
+ else
+ ar7_cpu_clock = ar7_bus_clock;
+
+ if (ar7_dsp_clock == 250000000)
+ tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
+ bootcr, ar7_dsp_clock);
+
+ iounmap(clocks);
+ iounmap(bootcr);
+}
+
+static int tnetd7200_get_clock(int base, struct tnetd7200_clock *clock,
+ u32 *bootcr, u32 bus_clock)
+{
+ int divisor = ((readl(&clock->prediv) & 0x1f) + 1) *
+ ((readl(&clock->postdiv) & 0x1f) + 1);
+
+ if (*bootcr & BOOT_PLL_BYPASS)
+ return base / divisor;
+
+ return base * ((readl(&clock->mul) & 0xf) + 1) / divisor;
+}
+
+
+static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
+ int prediv, int postdiv, int postdiv2, int mul, u32 frequency)
+{
+ printk(KERN_INFO
+ "Clocks: base = %d, frequency = %u, prediv = %d, "
+ "postdiv = %d, postdiv2 = %d, mul = %d\n",
+ base, frequency, prediv, postdiv, postdiv2, mul);
+
+ writel(0, &clock->ctrl);
+ writel(DIVISOR_ENABLE_MASK | ((prediv - 1) & 0x1F), &clock->prediv);
+ writel((mul - 1) & 0xF, &clock->mul);
+
+ while (readl(&clock->status) & 0x1)
+ ; /* nop */
+
+ writel(DIVISOR_ENABLE_MASK | ((postdiv - 1) & 0x1F), &clock->postdiv);
+
+ writel(readl(&clock->cmden) | 1, &clock->cmden);
+ writel(readl(&clock->cmd) | 1, &clock->cmd);
+
+ while (readl(&clock->status) & 0x1)
+ ; /* nop */
+
+ writel(DIVISOR_ENABLE_MASK | ((postdiv2 - 1) & 0x1F), &clock->postdiv2);
+
+ writel(readl(&clock->cmden) | 1, &clock->cmden);
+ writel(readl(&clock->cmd) | 1, &clock->cmd);
+
+ while (readl(&clock->status) & 0x1)
+ ; /* nop */
+
+ writel(readl(&clock->ctrl) | 1, &clock->ctrl);
+}
+
+static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr)
+{
+ if (*bootcr & BOOT_PLL_ASYNC_MODE)
+ /* Async */
+ switch (clock_id) {
+ case TNETD7200_CLOCK_ID_DSP:
+ return AR7_REF_CLOCK;
+ default:
+ return AR7_AFE_CLOCK;
+ }
+ else
+ /* Sync */
+ if (*bootcr & BOOT_PLL_2TO1_MODE)
+ /* 2:1 */
+ switch (clock_id) {
+ case TNETD7200_CLOCK_ID_DSP:
+ return AR7_REF_CLOCK;
+ default:
+ return AR7_AFE_CLOCK;
+ }
+ else
+ /* 1:1 */
+ return AR7_REF_CLOCK;
+}
+
+
+static void __init tnetd7200_init_clocks(void)
+{
+ u32 *bootcr = (u32 *)ioremap_nocache(AR7_REGS_DCL, 4);
+ struct tnetd7200_clocks *clocks =
+ ioremap_nocache(AR7_REGS_CLOCKS,
+ sizeof(struct tnetd7200_clocks));
+ int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
+ int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
+ int usb_base, usb_mul, usb_prediv, usb_postdiv;
+
+ cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
+ dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
+
+ if (*bootcr & BOOT_PLL_ASYNC_MODE) {
+ printk(KERN_INFO "Clocks: Async mode\n");
+
+ printk(KERN_INFO "Clocks: Setting DSP clock\n");
+ calculate(dsp_base, TNETD7200_DEF_DSP_CLK,
+ &dsp_prediv, &dsp_postdiv, &dsp_mul);
+ ar7_bus_clock =
+ ((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv;
+ tnetd7200_set_clock(dsp_base, &clocks->dsp,
+ dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2,
+ ar7_bus_clock);
+
+ printk(KERN_INFO "Clocks: Setting CPU clock\n");
+ calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
+ &cpu_postdiv, &cpu_mul);
+ ar7_cpu_clock =
+ ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv;
+ tnetd7200_set_clock(cpu_base, &clocks->cpu,
+ cpu_prediv, cpu_postdiv, -1, cpu_mul,
+ ar7_cpu_clock);
+
+ } else
+ if (*bootcr & BOOT_PLL_2TO1_MODE) {
+ printk(KERN_INFO "Clocks: Sync 2:1 mode\n");
+
+ printk(KERN_INFO "Clocks: Setting CPU clock\n");
+ calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
+ &cpu_postdiv, &cpu_mul);
+ ar7_cpu_clock = ((cpu_base / cpu_prediv) * cpu_mul)
+ / cpu_postdiv;
+ tnetd7200_set_clock(cpu_base, &clocks->cpu,
+ cpu_prediv, cpu_postdiv, -1, cpu_mul,
+ ar7_cpu_clock);
+
+ printk(KERN_INFO "Clocks: Setting DSP clock\n");
+ calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
+ &dsp_postdiv, &dsp_mul);
+ ar7_bus_clock = ar7_cpu_clock / 2;
+ tnetd7200_set_clock(dsp_base, &clocks->dsp,
+ dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
+ dsp_mul * 2, ar7_bus_clock);
+ } else {
+ printk(KERN_INFO "Clocks: Sync 1:1 mode\n");
+
+ printk(KERN_INFO "Clocks: Setting DSP clock\n");
+ calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
+ &dsp_postdiv, &dsp_mul);
+ ar7_bus_clock = ((dsp_base / dsp_prediv) * dsp_mul)
+ / dsp_postdiv;
+ tnetd7200_set_clock(dsp_base, &clocks->dsp,
+ dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
+ dsp_mul * 2, ar7_bus_clock);
+
+ ar7_cpu_clock = ar7_bus_clock;
+ }
+
+ printk(KERN_INFO "Clocks: Setting USB clock\n");
+ usb_base = ar7_bus_clock;
+ calculate(usb_base, TNETD7200_DEF_USB_CLK, &usb_prediv,
+ &usb_postdiv, &usb_mul);
+ tnetd7200_set_clock(usb_base, &clocks->usb,
+ usb_prediv, usb_postdiv, -1, usb_mul,
+ TNETD7200_DEF_USB_CLK);
+
+ ar7_dsp_clock = ar7_cpu_clock;
+
+ iounmap(clocks);
+ iounmap(bootcr);
+}
+
+int __init ar7_init_clocks(void)
+{
+ switch (ar7_chip_id()) {
+ case AR7_CHIP_7100:
+ case AR7_CHIP_7200:
+ tnetd7200_init_clocks();
+ break;
+ case AR7_CHIP_7300:
+ ar7_dsp_clock = tnetd7300_dsp_clock();
+ tnetd7300_init_clocks();
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+arch_initcall(ar7_init_clocks);
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
new file mode 100644
index 000000000000..74e14a3dbf4a
--- /dev/null
+++ b/arch/mips/ar7/gpio.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+
+#include <asm/mach-ar7/gpio.h>
+
+static const char *ar7_gpio_list[AR7_GPIO_MAX];
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ if (gpio >= AR7_GPIO_MAX)
+ return -EINVAL;
+
+ if (ar7_gpio_list[gpio])
+ return -EBUSY;
+
+ if (label)
+ ar7_gpio_list[gpio] = label;
+ else
+ ar7_gpio_list[gpio] = "busy";
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+ BUG_ON(!ar7_gpio_list[gpio]);
+ ar7_gpio_list[gpio] = NULL;
+}
+EXPORT_SYMBOL(gpio_free);
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
new file mode 100644
index 000000000000..c781556c44e4
--- /dev/null
+++ b/arch/mips/ar7/irq.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define EXCEPT_OFFSET 0x80
+#define PACE_OFFSET 0xA0
+#define CHNLS_OFFSET 0x200
+
+#define REG_OFFSET(irq, reg) ((irq) / 32 * 0x4 + reg * 0x10)
+#define SEC_REG_OFFSET(reg) (EXCEPT_OFFSET + reg * 0x8)
+#define SEC_SR_OFFSET (SEC_REG_OFFSET(0)) /* 0x80 */
+#define CR_OFFSET(irq) (REG_OFFSET(irq, 1)) /* 0x10 */
+#define SEC_CR_OFFSET (SEC_REG_OFFSET(1)) /* 0x88 */
+#define ESR_OFFSET(irq) (REG_OFFSET(irq, 2)) /* 0x20 */
+#define SEC_ESR_OFFSET (SEC_REG_OFFSET(2)) /* 0x90 */
+#define ECR_OFFSET(irq) (REG_OFFSET(irq, 3)) /* 0x30 */
+#define SEC_ECR_OFFSET (SEC_REG_OFFSET(3)) /* 0x98 */
+#define PIR_OFFSET (0x40)
+#define MSR_OFFSET (0x44)
+#define PM_OFFSET(irq) (REG_OFFSET(irq, 5)) /* 0x50 */
+#define TM_OFFSET(irq) (REG_OFFSET(irq, 6)) /* 0x60 */
+
+#define REG(addr) ((u32 *)(KSEG1ADDR(AR7_REGS_IRQ) + addr))
+
+#define CHNL_OFFSET(chnl) (CHNLS_OFFSET + (chnl * 4))
+
+static int ar7_irq_base;
+
+static void ar7_unmask_irq(unsigned int irq)
+{
+ writel(1 << ((irq - ar7_irq_base) % 32),
+ REG(ESR_OFFSET(irq - ar7_irq_base)));
+}
+
+static void ar7_mask_irq(unsigned int irq)
+{
+ writel(1 << ((irq - ar7_irq_base) % 32),
+ REG(ECR_OFFSET(irq - ar7_irq_base)));
+}
+
+static void ar7_ack_irq(unsigned int irq)
+{
+ writel(1 << ((irq - ar7_irq_base) % 32),
+ REG(CR_OFFSET(irq - ar7_irq_base)));
+}
+
+static void ar7_unmask_sec_irq(unsigned int irq)
+{
+ writel(1 << (irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET));
+}
+
+static void ar7_mask_sec_irq(unsigned int irq)
+{
+ writel(1 << (irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET));
+}
+
+static void ar7_ack_sec_irq(unsigned int irq)
+{
+ writel(1 << (irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET));
+}
+
+static struct irq_chip ar7_irq_type = {
+ .name = "AR7",
+ .unmask = ar7_unmask_irq,
+ .mask = ar7_mask_irq,
+ .ack = ar7_ack_irq
+};
+
+static struct irq_chip ar7_sec_irq_type = {
+ .name = "AR7",
+ .unmask = ar7_unmask_sec_irq,
+ .mask = ar7_mask_sec_irq,
+ .ack = ar7_ack_sec_irq,
+};
+
+static struct irqaction ar7_cascade_action = {
+ .handler = no_action,
+ .name = "AR7 cascade interrupt"
+};
+
+static void __init ar7_irq_init(int base)
+{
+ int i;
+ /*
+ * Disable interrupts and clear pending
+ */
+ writel(0xffffffff, REG(ECR_OFFSET(0)));
+ writel(0xff, REG(ECR_OFFSET(32)));
+ writel(0xffffffff, REG(SEC_ECR_OFFSET));
+ writel(0xffffffff, REG(CR_OFFSET(0)));
+ writel(0xff, REG(CR_OFFSET(32)));
+ writel(0xffffffff, REG(SEC_CR_OFFSET));
+
+ ar7_irq_base = base;
+
+ for (i = 0; i < 40; i++) {
+ writel(i, REG(CHNL_OFFSET(i)));
+ /* Primary IRQ's */
+ set_irq_chip_and_handler(base + i, &ar7_irq_type,
+ handle_level_irq);
+ /* Secondary IRQ's */
+ if (i < 32)
+ set_irq_chip_and_handler(base + i + 40,
+ &ar7_sec_irq_type,
+ handle_level_irq);
+ }
+
+ setup_irq(2, &ar7_cascade_action);
+ setup_irq(ar7_irq_base, &ar7_cascade_action);
+ set_c0_status(IE_IRQ0);
+}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+ ar7_irq_init(8);
+}
+
+static void ar7_cascade(void)
+{
+ u32 status;
+ int i, irq;
+
+ /* Primary IRQ's */
+ irq = readl(REG(PIR_OFFSET)) & 0x3f;
+ if (irq) {
+ do_IRQ(ar7_irq_base + irq);
+ return;
+ }
+
+ /* Secondary IRQ's are cascaded through primary '0' */
+ writel(1, REG(CR_OFFSET(irq)));
+ status = readl(REG(SEC_SR_OFFSET));
+ for (i = 0; i < 32; i++) {
+ if (status & 1) {
+ do_IRQ(ar7_irq_base + i + 40);
+ return;
+ }
+ status >>= 1;
+ }
+
+ spurious_interrupt();
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ if (pending & STATUSF_IP7) /* cpu timer */
+ do_IRQ(7);
+ else if (pending & STATUSF_IP2) /* int0 hardware line */
+ ar7_cascade();
+ else
+ spurious_interrupt();
+}
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
new file mode 100644
index 000000000000..46fed44825a6
--- /dev/null
+++ b/arch/mips/ar7/memory.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pfn.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/swap.h>
+
+#include <asm/bootinfo.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mips-boards/prom.h>
+
+static int __init memsize(void)
+{
+ u32 size = (64 << 20);
+ u32 *addr = (u32 *)KSEG1ADDR(AR7_SDRAM_BASE + size - 4);
+ u32 *kernel_end = (u32 *)KSEG1ADDR(CPHYSADDR((u32)&_end));
+ u32 *tmpaddr = addr;
+
+ while (tmpaddr > kernel_end) {
+ *tmpaddr = (u32)tmpaddr;
+ size >>= 1;
+ tmpaddr -= size >> 2;
+ }
+
+ do {
+ tmpaddr += size >> 2;
+ if (*tmpaddr != (u32)tmpaddr)
+ break;
+ size <<= 1;
+ } while (size < (64 << 20));
+
+ writel(tmpaddr, &addr);
+
+ return size;
+}
+
+void __init prom_meminit(void)
+{
+ unsigned long pages;
+
+ pages = memsize() >> PAGE_SHIFT;
+ add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing to free */
+}
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
new file mode 100644
index 000000000000..542244961780
--- /dev/null
+++ b/arch/mips/ar7/platform.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/version.h>
+#include <linux/vlynq.h>
+#include <linux/leds.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include <asm/addrspace.h>
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mach-ar7/gpio.h>
+#include <asm/mach-ar7/prom.h>
+
+struct plat_vlynq_data {
+ struct plat_vlynq_ops ops;
+ int gpio_bit;
+ int reset_bit;
+};
+
+
+static int vlynq_on(struct vlynq_device *dev)
+{
+ int result;
+ struct plat_vlynq_data *pdata = dev->dev.platform_data;
+
+ result = gpio_request(pdata->gpio_bit, "vlynq");
+ if (result)
+ goto out;
+
+ ar7_device_reset(pdata->reset_bit);
+
+ result = ar7_gpio_disable(pdata->gpio_bit);
+ if (result)
+ goto out_enabled;
+
+ result = ar7_gpio_enable(pdata->gpio_bit);
+ if (result)
+ goto out_enabled;
+
+ result = gpio_direction_output(pdata->gpio_bit, 0);
+ if (result)
+ goto out_gpio_enabled;
+
+ msleep(50);
+
+ gpio_set_value(pdata->gpio_bit, 1);
+ msleep(50);
+
+ return 0;
+
+out_gpio_enabled:
+ ar7_gpio_disable(pdata->gpio_bit);
+out_enabled:
+ ar7_device_disable(pdata->reset_bit);
+ gpio_free(pdata->gpio_bit);
+out:
+ return result;
+}
+
+static void vlynq_off(struct vlynq_device *dev)
+{
+ struct plat_vlynq_data *pdata = dev->dev.platform_data;
+ ar7_gpio_disable(pdata->gpio_bit);
+ gpio_free(pdata->gpio_bit);
+ ar7_device_disable(pdata->reset_bit);
+}
+
+static struct resource physmap_flash_resource = {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x10000000,
+ .end = 0x107fffff,
+};
+
+static struct resource cpmac_low_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_MAC0,
+ .end = AR7_REGS_MAC0 + 0x7ff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 27,
+ .end = 27,
+ },
+};
+
+static struct resource cpmac_high_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_MAC1,
+ .end = AR7_REGS_MAC1 + 0x7ff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 41,
+ .end = 41,
+ },
+};
+
+static struct resource vlynq_low_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_VLYNQ0,
+ .end = AR7_REGS_VLYNQ0 + 0xff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 29,
+ .end = 29,
+ },
+ {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x04000000,
+ .end = 0x04ffffff,
+ },
+ {
+ .name = "devirq",
+ .flags = IORESOURCE_IRQ,
+ .start = 80,
+ .end = 111,
+ },
+};
+
+static struct resource vlynq_high_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_VLYNQ1,
+ .end = AR7_REGS_VLYNQ1 + 0xff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 33,
+ .end = 33,
+ },
+ {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x0c000000,
+ .end = 0x0cffffff,
+ },
+ {
+ .name = "devirq",
+ .flags = IORESOURCE_IRQ,
+ .start = 112,
+ .end = 143,
+ },
+};
+
+static struct resource usb_res[] = {
+ {
+ .name = "regs",
+ .flags = IORESOURCE_MEM,
+ .start = AR7_REGS_USB,
+ .end = AR7_REGS_USB + 0xff,
+ },
+ {
+ .name = "irq",
+ .flags = IORESOURCE_IRQ,
+ .start = 32,
+ .end = 32,
+ },
+ {
+ .name = "mem",
+ .flags = IORESOURCE_MEM,
+ .start = 0x03400000,
+ .end = 0x034001fff,
+ },
+};
+
+static struct physmap_flash_data physmap_flash_data = {
+ .width = 2,
+};
+
+static struct plat_cpmac_data cpmac_low_data = {
+ .reset_bit = 17,
+ .power_bit = 20,
+ .phy_mask = 0x80000000,
+};
+
+static struct plat_cpmac_data cpmac_high_data = {
+ .reset_bit = 21,
+ .power_bit = 22,
+ .phy_mask = 0x7fffffff,
+};
+
+static struct plat_vlynq_data vlynq_low_data = {
+ .ops.on = vlynq_on,
+ .ops.off = vlynq_off,
+ .reset_bit = 20,
+ .gpio_bit = 18,
+};
+
+static struct plat_vlynq_data vlynq_high_data = {
+ .ops.on = vlynq_on,
+ .ops.off = vlynq_off,
+ .reset_bit = 16,
+ .gpio_bit = 19,
+};
+
+static struct platform_device physmap_flash = {
+ .id = 0,
+ .name = "physmap-flash",
+ .dev.platform_data = &physmap_flash_data,
+ .resource = &physmap_flash_resource,
+ .num_resources = 1,
+};
+
+static u64 cpmac_dma_mask = DMA_32BIT_MASK;
+static struct platform_device cpmac_low = {
+ .id = 0,
+ .name = "cpmac",
+ .dev = {
+ .dma_mask = &cpmac_dma_mask,
+ .coherent_dma_mask = DMA_32BIT_MASK,
+ .platform_data = &cpmac_low_data,
+ },
+ .resource = cpmac_low_res,
+ .num_resources = ARRAY_SIZE(cpmac_low_res),
+};
+
+static struct platform_device cpmac_high = {
+ .id = 1,
+ .name = "cpmac",
+ .dev = {
+ .dma_mask = &cpmac_dma_mask,
+ .coherent_dma_mask = DMA_32BIT_MASK,
+ .platform_data = &cpmac_high_data,
+ },
+ .resource = cpmac_high_res,
+ .num_resources = ARRAY_SIZE(cpmac_high_res),
+};
+
+static struct platform_device vlynq_low = {
+ .id = 0,
+ .name = "vlynq",
+ .dev.platform_data = &vlynq_low_data,
+ .resource = vlynq_low_res,
+ .num_resources = ARRAY_SIZE(vlynq_low_res),
+};
+
+static struct platform_device vlynq_high = {
+ .id = 1,
+ .name = "vlynq",
+ .dev.platform_data = &vlynq_high_data,
+ .resource = vlynq_high_res,
+ .num_resources = ARRAY_SIZE(vlynq_high_res),
+};
+
+
+static struct gpio_led default_leds[] = {
+ {
+ .name = "status",
+ .gpio = 8,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led dsl502t_leds[] = {
+ {
+ .name = "status",
+ .gpio = 9,
+ .active_low = 1,
+ },
+ {
+ .name = "ethernet",
+ .gpio = 7,
+ .active_low = 1,
+ },
+ {
+ .name = "usb",
+ .gpio = 12,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led dg834g_leds[] = {
+ {
+ .name = "ppp",
+ .gpio = 6,
+ .active_low = 1,
+ },
+ {
+ .name = "status",
+ .gpio = 7,
+ .active_low = 1,
+ },
+ {
+ .name = "adsl",
+ .gpio = 8,
+ .active_low = 1,
+ },
+ {
+ .name = "wifi",
+ .gpio = 12,
+ .active_low = 1,
+ },
+ {
+ .name = "power",
+ .gpio = 14,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led fb_sl_leds[] = {
+ {
+ .name = "1",
+ .gpio = 7,
+ },
+ {
+ .name = "2",
+ .gpio = 13,
+ .active_low = 1,
+ },
+ {
+ .name = "3",
+ .gpio = 10,
+ .active_low = 1,
+ },
+ {
+ .name = "4",
+ .gpio = 12,
+ .active_low = 1,
+ },
+ {
+ .name = "5",
+ .gpio = 9,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led fb_fon_leds[] = {
+ {
+ .name = "1",
+ .gpio = 8,
+ },
+ {
+ .name = "2",
+ .gpio = 3,
+ .active_low = 1,
+ },
+ {
+ .name = "3",
+ .gpio = 5,
+ },
+ {
+ .name = "4",
+ .gpio = 4,
+ .active_low = 1,
+ },
+ {
+ .name = "5",
+ .gpio = 11,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data ar7_led_data;
+
+static struct platform_device ar7_gpio_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &ar7_led_data,
+ }
+};
+
+static struct platform_device ar7_udc = {
+ .id = -1,
+ .name = "ar7_udc",
+ .resource = usb_res,
+ .num_resources = ARRAY_SIZE(usb_res),
+};
+
+static inline unsigned char char2hex(char h)
+{
+ switch (h) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ return h - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return h - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return h - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+static void cpmac_get_mac(int instance, unsigned char *dev_addr)
+{
+ int i;
+ char name[5], default_mac[ETH_ALEN], *mac;
+
+ mac = NULL;
+ sprintf(name, "mac%c", 'a' + instance);
+ mac = prom_getenv(name);
+ if (!mac) {
+ sprintf(name, "mac%c", 'a');
+ mac = prom_getenv(name);
+ }
+ if (!mac) {
+ random_ether_addr(default_mac);
+ mac = default_mac;
+ }
+ for (i = 0; i < 6; i++)
+ dev_addr[i] = (char2hex(mac[i * 3]) << 4) +
+ char2hex(mac[i * 3 + 1]);
+}
+
+static void __init detect_leds(void)
+{
+ char *prid, *usb_prod;
+
+ /* Default LEDs */
+ ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
+ ar7_led_data.leds = default_leds;
+
+ /* FIXME: the whole thing is unreliable */
+ prid = prom_getenv("ProductID");
+ usb_prod = prom_getenv("usb_prod");
+
+ /* If we can't get the product id from PROM, use the default LEDs */
+ if (!prid)
+ return;
+
+ if (strstr(prid, "Fritz_Box_FON")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(fb_fon_leds);
+ ar7_led_data.leds = fb_fon_leds;
+ } else if (strstr(prid, "Fritz_Box_")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(fb_sl_leds);
+ ar7_led_data.leds = fb_sl_leds;
+ } else if ((!strcmp(prid, "AR7RD") || !strcmp(prid, "AR7DB"))
+ && usb_prod != NULL && strstr(usb_prod, "DSL-502T")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(dsl502t_leds);
+ ar7_led_data.leds = dsl502t_leds;
+ } else if (strstr(prid, "DG834")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
+ ar7_led_data.leds = dg834g_leds;
+ }
+}
+
+static int __init ar7_register_devices(void)
+{
+ int res;
+ static struct uart_port uart_port[2];
+
+ memset(uart_port, 0, sizeof(struct uart_port) * 2);
+
+ uart_port[0].type = PORT_16550A;
+ uart_port[0].line = 0;
+ uart_port[0].irq = AR7_IRQ_UART0;
+ uart_port[0].uartclk = ar7_bus_freq() / 2;
+ uart_port[0].iotype = UPIO_MEM32;
+ uart_port[0].mapbase = AR7_REGS_UART0;
+ uart_port[0].membase = ioremap(uart_port[0].mapbase, 256);
+ uart_port[0].regshift = 2;
+ res = early_serial_setup(&uart_port[0]);
+ if (res)
+ return res;
+
+
+ /* Only TNETD73xx have a second serial port */
+ if (ar7_has_second_uart()) {
+ uart_port[1].type = PORT_16550A;
+ uart_port[1].line = 1;
+ uart_port[1].irq = AR7_IRQ_UART1;
+ uart_port[1].uartclk = ar7_bus_freq() / 2;
+ uart_port[1].iotype = UPIO_MEM32;
+ uart_port[1].mapbase = UR8_REGS_UART1;
+ uart_port[1].membase = ioremap(uart_port[1].mapbase, 256);
+ uart_port[1].regshift = 2;
+ res = early_serial_setup(&uart_port[1]);
+ if (res)
+ return res;
+ }
+
+ res = platform_device_register(&physmap_flash);
+ if (res)
+ return res;
+
+ ar7_device_disable(vlynq_low_data.reset_bit);
+ res = platform_device_register(&vlynq_low);
+ if (res)
+ return res;
+
+ if (ar7_has_high_vlynq()) {
+ ar7_device_disable(vlynq_high_data.reset_bit);
+ res = platform_device_register(&vlynq_high);
+ if (res)
+ return res;
+ }
+
+ if (ar7_has_high_cpmac()) {
+ cpmac_get_mac(1, cpmac_high_data.dev_addr);
+ res = platform_device_register(&cpmac_high);
+ if (res)
+ return res;
+ } else {
+ cpmac_low_data.phy_mask = 0xffffffff;
+ }
+
+ cpmac_get_mac(0, cpmac_low_data.dev_addr);
+ res = platform_device_register(&cpmac_low);
+ if (res)
+ return res;
+
+ detect_leds();
+ res = platform_device_register(&ar7_gpio_leds);
+ if (res)
+ return res;
+
+ res = platform_device_register(&ar7_udc);
+
+ return res;
+}
+arch_initcall(ar7_register_devices);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
new file mode 100644
index 000000000000..a320bceb2f9d
--- /dev/null
+++ b/arch/mips/ar7/prom.c
@@ -0,0 +1,297 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Putting things on the screen/serial line using YAMONs facilities.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/serial_reg.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <asm/bootinfo.h>
+
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mach-ar7/prom.h>
+
+#define MAX_ENTRY 80
+
+struct env_var {
+ char *name;
+ char *value;
+};
+
+static struct env_var adam2_env[MAX_ENTRY];
+
+char *prom_getenv(const char *name)
+{
+ int i;
+ for (i = 0; (i < MAX_ENTRY) && adam2_env[i].name; i++)
+ if (!strcmp(name, adam2_env[i].name))
+ return adam2_env[i].value;
+
+ return NULL;
+}
+EXPORT_SYMBOL(prom_getenv);
+
+char * __init prom_getcmdline(void)
+{
+ return &(arcs_cmdline[0]);
+}
+
+static void __init ar7_init_cmdline(int argc, char *argv[])
+{
+ char *cp;
+ int actr;
+
+ actr = 1; /* Always ignore argv[0] */
+
+ cp = &(arcs_cmdline[0]);
+ while (actr < argc) {
+ strcpy(cp, argv[actr]);
+ cp += strlen(argv[actr]);
+ *cp++ = ' ';
+ actr++;
+ }
+ if (cp != &(arcs_cmdline[0])) {
+ /* get rid of trailing space */
+ --cp;
+ *cp = '\0';
+ }
+}
+
+struct psbl_rec {
+ u32 psbl_size;
+ u32 env_base;
+ u32 env_size;
+ u32 ffs_base;
+ u32 ffs_size;
+};
+
+static __initdata char psp_env_version[] = "TIENV0.8";
+
+struct psp_env_chunk {
+ u8 num;
+ u8 ctrl;
+ u16 csum;
+ u8 len;
+ char data[11];
+} __attribute__ ((packed));
+
+struct psp_var_map_entry {
+ u8 num;
+ char *value;
+};
+
+static struct psp_var_map_entry psp_var_map[] = {
+ { 1, "cpufrequency" },
+ { 2, "memsize" },
+ { 3, "flashsize" },
+ { 4, "modetty0" },
+ { 5, "modetty1" },
+ { 8, "maca" },
+ { 9, "macb" },
+ { 28, "sysfrequency" },
+ { 38, "mipsfrequency" },
+};
+
+/*
+
+Well-known variable (num is looked up in table above for matching variable name)
+Example: cpufrequency=211968000
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| 01 |CTRL|CHECKSUM | 01 | _2 | _1 | _1 | _9 | _6 | _8 | _0 | _0 | _0 | \0 | FF
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+
+Name=Value pair in a single chunk
+Example: NAME=VALUE
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| 00 |CTRL|CHECKSUM | 01 | _N | _A | _M | _E | _0 | _V | _A | _L | _U | _E | \0
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+
+Name=Value pair in 2 chunks (len is the number of chunks)
+Example: bootloaderVersion=1.3.7.15
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| 00 |CTRL|CHECKSUM | 02 | _b | _o | _o | _t | _l | _o | _a | _d | _e | _r | _V
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+| _e | _r | _s | _i | _o | _n | \0 | _1 | _. | _3 | _. | _7 | _. | _1 | _5 | \0
++----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
+
+Data is padded with 0xFF
+
+*/
+
+#define PSP_ENV_SIZE 4096
+
+static char psp_env_data[PSP_ENV_SIZE] = { 0, };
+
+static char * __init lookup_psp_var_map(u8 num)
+{
+ int i;
+
+ for (i = 0; i < sizeof(psp_var_map); i++)
+ if (psp_var_map[i].num == num)
+ return psp_var_map[i].value;
+
+ return NULL;
+}
+
+static void __init add_adam2_var(char *name, char *value)
+{
+ int i;
+ for (i = 0; i < MAX_ENTRY; i++) {
+ if (!adam2_env[i].name) {
+ adam2_env[i].name = name;
+ adam2_env[i].value = value;
+ return;
+ } else if (!strcmp(adam2_env[i].name, name)) {
+ adam2_env[i].value = value;
+ return;
+ }
+ }
+}
+
+static int __init parse_psp_env(void *psp_env_base)
+{
+ int i, n;
+ char *name, *value;
+ struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data;
+
+ memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE);
+
+ i = 1;
+ n = PSP_ENV_SIZE / sizeof(struct psp_env_chunk);
+ while (i < n) {
+ if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n))
+ break;
+ value = chunks[i].data;
+ if (chunks[i].num) {
+ name = lookup_psp_var_map(chunks[i].num);
+ } else {
+ name = value;
+ value += strlen(name) + 1;
+ }
+ if (name)
+ add_adam2_var(name, value);
+ i += chunks[i].len;
+ }
+ return 0;
+}
+
+static void __init ar7_init_env(struct env_var *env)
+{
+ int i;
+ struct psbl_rec *psbl = (struct psbl_rec *)(KSEG1ADDR(0x14000300));
+ void *psp_env = (void *)KSEG1ADDR(psbl->env_base);
+
+ if (strcmp(psp_env, psp_env_version) == 0) {
+ parse_psp_env(psp_env);
+ } else {
+ for (i = 0; i < MAX_ENTRY; i++, env++)
+ if (env->name)
+ add_adam2_var(env->name, env->value);
+ }
+}
+
+static void __init console_config(void)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ char console_string[40];
+ int baud = 0;
+ char parity = '\0', bits = '\0', flow = '\0';
+ char *s, *p;
+
+ if (strstr(prom_getcmdline(), "console="))
+ return;
+
+#ifdef CONFIG_KGDB
+ if (!strstr(prom_getcmdline(), "nokgdb")) {
+ strcat(prom_getcmdline(), " console=kgdb");
+ kgdb_enabled = 1;
+ return;
+ }
+#endif
+
+ s = prom_getenv("modetty0");
+ if (s) {
+ baud = simple_strtoul(s, &p, 10);
+ s = p;
+ if (*s == ',')
+ s++;
+ if (*s)
+ parity = *s++;
+ if (*s == ',')
+ s++;
+ if (*s)
+ bits = *s++;
+ if (*s == ',')
+ s++;
+ if (*s == 'h')
+ flow = 'r';
+ }
+
+ if (baud == 0)
+ baud = 38400;
+ if (parity != 'n' && parity != 'o' && parity != 'e')
+ parity = 'n';
+ if (bits != '7' && bits != '8')
+ bits = '8';
+
+ if (flow == 'r')
+ sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+ parity, bits, flow);
+ else
+ sprintf(console_string, " console=ttyS0,%d%c%c", baud, parity,
+ bits);
+ strcat(prom_getcmdline(), console_string);
+#endif
+}
+
+void __init prom_init(void)
+{
+ ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
+ ar7_init_env((struct env_var *)fw_arg2);
+ console_config();
+}
+
+#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
+static inline unsigned int serial_in(int offset)
+{
+ return readl((void *)PORT(offset));
+}
+
+static inline void serial_out(int offset, int value)
+{
+ writel(value, (void *)PORT(offset));
+}
+
+char prom_getchar(void)
+{
+ while (!(serial_in(UART_LSR) & UART_LSR_DR))
+ ;
+ return serial_in(UART_RX);
+}
+
+int prom_putchar(char c)
+{
+ while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
+ ;
+ serial_out(UART_TX, c);
+ return 1;
+}
+
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
new file mode 100644
index 000000000000..6ebb5f16d967
--- /dev/null
+++ b/arch/mips/ar7/setup.c
@@ -0,0 +1,94 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pm.h>
+#include <linux/time.h>
+
+#include <asm/reboot.h>
+#include <asm/mach-ar7/ar7.h>
+#include <asm/mach-ar7/prom.h>
+
+static void ar7_machine_restart(char *command)
+{
+ u32 *softres_reg = ioremap(AR7_REGS_RESET +
+ AR7_RESET_SOFTWARE, 1);
+ writel(1, softres_reg);
+}
+
+static void ar7_machine_halt(void)
+{
+ while (1)
+ ;
+}
+
+static void ar7_machine_power_off(void)
+{
+ u32 *power_reg = (u32 *)ioremap(AR7_REGS_POWER, 1);
+ u32 power_state = readl(power_reg) | (3 << 30);
+ writel(power_state, power_reg);
+ ar7_machine_halt();
+}
+
+const char *get_system_type(void)
+{
+ u16 chip_id = ar7_chip_id();
+ switch (chip_id) {
+ case AR7_CHIP_7300:
+ return "TI AR7 (TNETD7300)";
+ case AR7_CHIP_7100:
+ return "TI AR7 (TNETD7100)";
+ case AR7_CHIP_7200:
+ return "TI AR7 (TNETD7200)";
+ default:
+ return "TI AR7 (Unknown)";
+ }
+}
+
+static int __init ar7_init_console(void)
+{
+ return 0;
+}
+console_initcall(ar7_init_console);
+
+/*
+ * Initializes basic routines and structures pointers, memory size (as
+ * given by the bios and saves the command line.
+ */
+
+void __init plat_mem_setup(void)
+{
+ unsigned long io_base;
+
+ _machine_restart = ar7_machine_restart;
+ _machine_halt = ar7_machine_halt;
+ pm_power_off = ar7_machine_power_off;
+ panic_timeout = 3;
+
+ io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
+ if (!io_base)
+ panic("Can't remap IO base!\n");
+ set_io_port_base(io_base);
+
+ prom_meminit();
+
+ printk(KERN_INFO "%s, ID: 0x%04x, Revision: 0x%02x\n",
+ get_system_type(),
+ ar7_chip_id(), ar7_chip_rev());
+}
diff --git a/arch/mips/ar7/time.c b/arch/mips/ar7/time.c
new file mode 100644
index 000000000000..a1fba894daa2
--- /dev/null
+++ b/arch/mips/ar7/time.c
@@ -0,0 +1,30 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Setting up the clock on the MIPS boards.
+ */
+
+#include <linux/init.h>
+#include <linux/time.h>
+
+#include <asm/time.h>
+#include <asm/mach-ar7/ar7.h>
+
+void __init plat_time_init(void)
+{
+ mips_hpt_frequency = ar7_cpu_freq() / 2;
+}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 627c162a6159..4b92bfc662db 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -29,7 +29,7 @@
#include <dma-coherence.h>
#ifdef CONFIG_PCI
-#include "pci-common.h"
+#include <asm/octeon/pci-octeon.h>
#endif
#define BAR2_PCI_ADDRESS 0x8000000000ul
diff --git a/arch/mips/cobalt/buttons.c b/arch/mips/cobalt/buttons.c
index 9e143989c7b8..4eaec8b46e0c 100644
--- a/arch/mips/cobalt/buttons.c
+++ b/arch/mips/cobalt/buttons.c
@@ -1,7 +1,7 @@
/*
* Cobalt buttons platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/lcd.c b/arch/mips/cobalt/lcd.c
index 0720e4fae311..0f1cd90f37ed 100644
--- a/arch/mips/cobalt/lcd.c
+++ b/arch/mips/cobalt/lcd.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt LCD platform device.
*
- * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/led.c b/arch/mips/cobalt/led.c
index 1c6ebd468b07..d3ce6fa1dc74 100644
--- a/arch/mips/cobalt/led.c
+++ b/arch/mips/cobalt/led.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt LED platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/mtd.c b/arch/mips/cobalt/mtd.c
index 2b088ef3839a..691d620b6766 100644
--- a/arch/mips/cobalt/mtd.c
+++ b/arch/mips/cobalt/mtd.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt MTD device.
*
- * Copyright (C) 2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/rtc.c b/arch/mips/cobalt/rtc.c
index e70794b8bcba..3ab39898b4e4 100644
--- a/arch/mips/cobalt/rtc.c
+++ b/arch/mips/cobalt/rtc.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt RTC platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/serial.c b/arch/mips/cobalt/serial.c
index 53b8d0d6da90..7cb51f57275e 100644
--- a/arch/mips/cobalt/serial.c
+++ b/arch/mips/cobalt/serial.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt UART platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/time.c b/arch/mips/cobalt/time.c
index 4a570e7145fe..0162f9edc693 100644
--- a/arch/mips/cobalt/time.c
+++ b/arch/mips/cobalt/time.c
@@ -1,7 +1,7 @@
/*
* Cobalt time initialization.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
new file mode 100644
index 000000000000..dad5b6769d74
--- /dev/null
+++ b/arch/mips/configs/ar7_defconfig
@@ -0,0 +1,1182 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.30
+# Wed Jun 24 14:08:59 2009
+#
+CONFIG_MIPS=y
+
+#
+# Machine selection
+#
+# CONFIG_MACH_ALCHEMY is not set
+CONFIG_AR7=y
+# CONFIG_BASLER_EXCITE is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_LASAT is not set
+# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MIPS_MALTA is not set
+# CONFIG_MIPS_SIM is not set
+# CONFIG_NEC_MARKEINS is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_PNX8550_JBS is not set
+# CONFIG_PNX8550_STB810 is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
+CONFIG_CSRC_R4K=y
+CONFIG_DMA_NONCOHERENT=y
+CONFIG_DMA_NEED_PCI_MAP_STATE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_GPIO=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+CONFIG_IRQ_CPU=y
+CONFIG_NO_EXCEPT_FILL=y
+CONFIG_SWAP_IO_SPACE=y
+CONFIG_BOOT_ELF32=y
+CONFIG_MIPS_L1_CACHE_SHIFT=5
+
+#
+# CPU selection
+#
+# CONFIG_CPU_LOONGSON2 is not set
+CONFIG_CPU_MIPS32_R1=y
+# CONFIG_CPU_MIPS32_R2 is not set
+# CONFIG_CPU_MIPS64_R1 is not set
+# CONFIG_CPU_MIPS64_R2 is not set
+# CONFIG_CPU_R3000 is not set
+# CONFIG_CPU_TX39XX is not set
+# CONFIG_CPU_VR41XX is not set
+# CONFIG_CPU_R4300 is not set
+# CONFIG_CPU_R4X00 is not set
+# CONFIG_CPU_TX49XX is not set
+# CONFIG_CPU_R5000 is not set
+# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
+# CONFIG_CPU_R6000 is not set
+# CONFIG_CPU_NEVADA is not set
+# CONFIG_CPU_R8000 is not set
+# CONFIG_CPU_R10000 is not set
+# CONFIG_CPU_RM7000 is not set
+# CONFIG_CPU_RM9000 is not set
+# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_HAS_CPU_MIPS32_R1=y
+CONFIG_CPU_MIPS32=y
+CONFIG_CPU_MIPSR1=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
+
+#
+# Kernel type
+#
+CONFIG_32BIT=y
+# CONFIG_64BIT is not set
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_MIPS_MT_DISABLED=y
+# CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_CPU_HAS_LLSC=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_48 is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_128 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_HZ=100
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_KEXEC=y
+# CONFIG_SECCOMP is not set
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+CONFIG_IOSCHED_DEADLINE=y
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+CONFIG_PROBE_INITRD_HEADER=y
+# CONFIG_FREEZER is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, ISA, TC)
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_MMU=y
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_TRAD_SIGNALS=y
+
+#
+# Power management options
+#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_PM is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+# CONFIG_IP_PIMSM_V1 is not set
+# CONFIG_IP_PIMSM_V2 is not set
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+CONFIG_TCP_CONG_WESTWOOD=y
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_TCP_CONG_HSTCP is not set
+# CONFIG_TCP_CONG_HYBLA is not set
+# CONFIG_TCP_CONG_VEGAS is not set
+# CONFIG_TCP_CONG_SCALABLE is not set
+# CONFIG_TCP_CONG_LP is not set
+# CONFIG_TCP_CONG_VENO is not set
+# CONFIG_TCP_CONG_YEAH is not set
+# CONFIG_TCP_CONG_ILLINOIS is not set
+# CONFIG_DEFAULT_BIC is not set
+# CONFIG_DEFAULT_CUBIC is not set
+# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_VEGAS is not set
+CONFIG_DEFAULT_WESTWOOD=y
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="westwood"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+# CONFIG_BRIDGE_NETFILTER is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+CONFIG_NF_CONNTRACK=m
+# CONFIG_NF_CT_ACCT is not set
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+CONFIG_NF_CONNTRACK_FTP=m
+# CONFIG_NF_CONNTRACK_H323 is not set
+CONFIG_NF_CONNTRACK_IRC=m
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+CONFIG_NF_CONNTRACK_TFTP=m
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NETFILTER_TPROXY is not set
+CONFIG_NETFILTER_XTABLES=m
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+# CONFIG_NF_NAT_SNMP_BASIC is not set
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+# CONFIG_NF_NAT_SIP is not set
+CONFIG_IP_NF_MANGLE=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+CONFIG_IP_NF_RAW=m
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+CONFIG_ATM=m
+# CONFIG_ATM_CLIP is not set
+# CONFIG_ATM_LANE is not set
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_ATM is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_INGRESS is not set
+
+#
+# Classification
+#
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_EMATCH is not set
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_HAMRADIO=y
+
+#
+# Packet Radio protocols
+#
+# CONFIG_AX25 is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEFAULT_PS=y
+CONFIG_MAC80211_DEFAULT_PS_VALUE=1
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_IFB is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+CONFIG_FIXED_PHY=y
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+CONFIG_CPMAC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+# CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_ATM_DRIVERS=y
+# CONFIG_ATM_DUMMY is not set
+# CONFIG_ATM_TCP is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
+CONFIG_PPPOE=m
+CONFIG_PPPOATM=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AR7_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=y
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_SERIAL=y
+CONFIG_SSB_DRIVER_MIPS=y
+CONFIG_SSB_EMBEDDED=y
+CONFIG_SSB_DRIVER_EXTIF=y
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_GPIO is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+CONFIG_VLYNQ=y
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=m
+CONFIG_CRYPTO_ALGAPI2=m
+CONFIG_CRYPTO_AEAD2=m
+CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=m
+CONFIG_CRYPTO_HASH2=m
+CONFIG_CRYPTO_RNG2=m
+CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=m
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=m
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/gt64120/wrppmc/serial.c b/arch/mips/gt64120/wrppmc/serial.c
index 5ec1c2ffd3a5..6f9d0858f596 100644
--- a/arch/mips/gt64120/wrppmc/serial.c
+++ b/arch/mips/gt64120/wrppmc/serial.c
@@ -1,7 +1,7 @@
/*
* Registration of WRPPMC UART platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/amon.h b/arch/mips/include/asm/amon.h
new file mode 100644
index 000000000000..c3dc1a68dd8d
--- /dev/null
+++ b/arch/mips/include/asm/amon.h
@@ -0,0 +1,7 @@
+/*
+ * Amon support
+ */
+
+int amon_cpu_avail(int);
+void amon_cpu_start(int, unsigned long, unsigned long,
+ unsigned long, unsigned long);
diff --git a/arch/mips/include/asm/ds1287.h b/arch/mips/include/asm/ds1287.h
index ba1702e86931..3af0b8fb3b8c 100644
--- a/arch/mips/include/asm/ds1287.h
+++ b/arch/mips/include/asm/ds1287.h
@@ -1,7 +1,7 @@
/*
* DS1287 timer functions.
*
- * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index d58f128aa747..7990694cda22 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -316,9 +316,13 @@ extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs);
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
+#ifndef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(elf_regs, regs) \
elf_dump_regs((elf_greg_t *)&(elf_regs), regs);
+#endif
+#ifndef ELF_CORE_COPY_TASK_REGS
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+#endif
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
index d74a8a4ca861..36fd969d64d6 100644
--- a/arch/mips/include/asm/gcmpregs.h
+++ b/arch/mips/include/asm/gcmpregs.h
@@ -114,4 +114,6 @@
#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
+extern int __init gcmp_probe(unsigned long, unsigned long);
+
#endif /* _ASM_GCMPREGS_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 954807d9d66a..10292e37c1f7 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -20,7 +20,11 @@
#define GIC_TRIG_EDGE 1
#define GIC_TRIG_LEVEL 0
+#if CONFIG_SMP
+#define GIC_NUM_INTRS (24 + NR_CPUS * 2)
+#else
#define GIC_NUM_INTRS 32
+#endif
#define MSK(n) ((1 << (n)) - 1)
#define REG32(addr) (*(volatile unsigned int *) (addr))
@@ -483,5 +487,7 @@ extern void gic_init(unsigned long gic_base_addr,
extern unsigned int gic_get_int(void);
extern void gic_send_ipi(unsigned int intr);
+extern unsigned int plat_ipi_call_int_xlate(unsigned int);
+extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/irq_gt641xx.h b/arch/mips/include/asm/irq_gt641xx.h
index f9a7c3ac2e66..250a2407b599 100644
--- a/arch/mips/include/asm/irq_gt641xx.h
+++ b/arch/mips/include/asm/irq_gt641xx.h
@@ -1,7 +1,7 @@
/*
* Galileo/Marvell GT641xx IRQ definitions.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
new file mode 100644
index 000000000000..de71694614de
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __AR7_H__
+#define __AR7_H__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include <asm/addrspace.h>
+
+#define AR7_SDRAM_BASE 0x14000000
+
+#define AR7_REGS_BASE 0x08610000
+
+#define AR7_REGS_MAC0 (AR7_REGS_BASE + 0x0000)
+#define AR7_REGS_GPIO (AR7_REGS_BASE + 0x0900)
+/* 0x08610A00 - 0x08610BFF (512 bytes, 128 bytes / clock) */
+#define AR7_REGS_POWER (AR7_REGS_BASE + 0x0a00)
+#define AR7_REGS_CLOCKS (AR7_REGS_POWER + 0x80)
+#define UR8_REGS_CLOCKS (AR7_REGS_POWER + 0x20)
+#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
+#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
+#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
+#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
+#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
+#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
+#define AR7_REGS_MDIO (AR7_REGS_BASE + 0x1e00)
+#define AR7_REGS_IRQ (AR7_REGS_BASE + 0x2400)
+#define AR7_REGS_MAC1 (AR7_REGS_BASE + 0x2800)
+
+#define AR7_REGS_WDT (AR7_REGS_BASE + 0x1f00)
+#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
+#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
+
+#define AR7_RESET_PEREPHERIAL 0x0
+#define AR7_RESET_SOFTWARE 0x4
+#define AR7_RESET_STATUS 0x8
+
+#define AR7_RESET_BIT_CPMAC_LO 17
+#define AR7_RESET_BIT_CPMAC_HI 21
+#define AR7_RESET_BIT_MDIO 22
+#define AR7_RESET_BIT_EPHY 26
+
+/* GPIO control registers */
+#define AR7_GPIO_INPUT 0x0
+#define AR7_GPIO_OUTPUT 0x4
+#define AR7_GPIO_DIR 0x8
+#define AR7_GPIO_ENABLE 0xc
+
+#define AR7_CHIP_7100 0x18
+#define AR7_CHIP_7200 0x2b
+#define AR7_CHIP_7300 0x05
+
+/* Interrupts */
+#define AR7_IRQ_UART0 15
+#define AR7_IRQ_UART1 16
+
+/* Clocks */
+#define AR7_AFE_CLOCK 35328000
+#define AR7_REF_CLOCK 25000000
+#define AR7_XTAL_CLOCK 24000000
+
+struct plat_cpmac_data {
+ int reset_bit;
+ int power_bit;
+ u32 phy_mask;
+ char dev_addr[6];
+};
+
+struct plat_dsl_data {
+ int reset_bit_dsl;
+ int reset_bit_sar;
+};
+
+extern int ar7_cpu_clock, ar7_bus_clock, ar7_dsp_clock;
+
+static inline u16 ar7_chip_id(void)
+{
+ return readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff;
+}
+
+static inline u8 ar7_chip_rev(void)
+{
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x14)) >> 16) & 0xff;
+}
+
+static inline int ar7_cpu_freq(void)
+{
+ return ar7_cpu_clock;
+}
+
+static inline int ar7_bus_freq(void)
+{
+ return ar7_bus_clock;
+}
+
+static inline int ar7_vbus_freq(void)
+{
+ return ar7_bus_clock / 2;
+}
+#define ar7_cpmac_freq ar7_vbus_freq
+
+static inline int ar7_dsp_freq(void)
+{
+ return ar7_dsp_clock;
+}
+
+static inline int ar7_has_high_cpmac(void)
+{
+ u16 chip_id = ar7_chip_id();
+ switch (chip_id) {
+ case AR7_CHIP_7100:
+ case AR7_CHIP_7200:
+ return 0;
+ case AR7_CHIP_7300:
+ return 1;
+ default:
+ return -ENXIO;
+ }
+}
+#define ar7_has_high_vlynq ar7_has_high_cpmac
+#define ar7_has_second_uart ar7_has_high_cpmac
+
+static inline void ar7_device_enable(u32 bit)
+{
+ void *reset_reg =
+ (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PEREPHERIAL);
+ writel(readl(reset_reg) | (1 << bit), reset_reg);
+ msleep(20);
+}
+
+static inline void ar7_device_disable(u32 bit)
+{
+ void *reset_reg =
+ (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PEREPHERIAL);
+ writel(readl(reset_reg) & ~(1 << bit), reset_reg);
+ msleep(20);
+}
+
+static inline void ar7_device_reset(u32 bit)
+{
+ ar7_device_disable(bit);
+ ar7_device_enable(bit);
+}
+
+static inline void ar7_device_on(u32 bit)
+{
+ void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
+ writel(readl(power_reg) | (1 << bit), power_reg);
+ msleep(20);
+}
+
+static inline void ar7_device_off(u32 bit)
+{
+ void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
+ writel(readl(power_reg) & ~(1 << bit), power_reg);
+ msleep(20);
+}
+
+#endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
new file mode 100644
index 000000000000..cbe9c4f126df
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/gpio.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __AR7_GPIO_H__
+#define __AR7_GPIO_H__
+
+#include <asm/mach-ar7/ar7.h>
+
+#define AR7_GPIO_MAX 32
+
+extern int gpio_request(unsigned gpio, const char *label);
+extern void gpio_free(unsigned gpio);
+
+/* Common GPIO layer */
+static inline int gpio_get_value(unsigned gpio)
+{
+ void __iomem *gpio_in =
+ (void __iomem *)KSEG1ADDR(AR7_REGS_GPIO + AR7_GPIO_INPUT);
+
+ return readl(gpio_in) & (1 << gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ void __iomem *gpio_out =
+ (void __iomem *)KSEG1ADDR(AR7_REGS_GPIO + AR7_GPIO_OUTPUT);
+ unsigned tmp;
+
+ tmp = readl(gpio_out) & ~(1 << gpio);
+ if (value)
+ tmp |= 1 << gpio;
+ writel(tmp, gpio_out);
+}
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ void __iomem *gpio_dir =
+ (void __iomem *)KSEG1ADDR(AR7_REGS_GPIO + AR7_GPIO_DIR);
+
+ if (gpio >= AR7_GPIO_MAX)
+ return -EINVAL;
+
+ writel(readl(gpio_dir) | (1 << gpio), gpio_dir);
+
+ return 0;
+}
+
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ void __iomem *gpio_dir =
+ (void __iomem *)KSEG1ADDR(AR7_REGS_GPIO + AR7_GPIO_DIR);
+
+ if (gpio >= AR7_GPIO_MAX)
+ return -EINVAL;
+
+ gpio_set_value(gpio, value);
+ writel(readl(gpio_dir) & ~(1 << gpio), gpio_dir);
+
+ return 0;
+}
+
+static inline int gpio_to_irq(unsigned gpio)
+{
+ return -EINVAL;
+}
+
+static inline int irq_to_gpio(unsigned irq)
+{
+ return -EINVAL;
+}
+
+/* Board specific GPIO functions */
+static inline int ar7_gpio_enable(unsigned gpio)
+{
+ void __iomem *gpio_en =
+ (void __iomem *)KSEG1ADDR(AR7_REGS_GPIO + AR7_GPIO_ENABLE);
+
+ writel(readl(gpio_en) | (1 << gpio), gpio_en);
+
+ return 0;
+}
+
+static inline int ar7_gpio_disable(unsigned gpio)
+{
+ void __iomem *gpio_en =
+ (void __iomem *)KSEG1ADDR(AR7_REGS_GPIO + AR7_GPIO_ENABLE);
+
+ writel(readl(gpio_en) & ~(1 << gpio), gpio_en);
+
+ return 0;
+}
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-ar7/irq.h b/arch/mips/include/asm/mach-ar7/irq.h
new file mode 100644
index 000000000000..39e9757e3d93
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/irq.h
@@ -0,0 +1,16 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Shamelessly copied from asm-mips/mach-emma2rh/
+ * Copyright (C) 2003 by Ralf Baechle
+ */
+#ifndef __ASM_AR7_IRQ_H
+#define __ASM_AR7_IRQ_H
+
+#define NR_IRQS 256
+
+#include_next <irq.h>
+
+#endif /* __ASM_AR7_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ar7/prom.h b/arch/mips/include/asm/mach-ar7/prom.h
new file mode 100644
index 000000000000..088f61fe85ea
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/prom.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2006, 2007 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __PROM_H__
+#define __PROM_H__
+
+extern char *prom_getenv(const char *name);
+extern void prom_meminit(void);
+
+#endif /* __PROM_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
new file mode 100644
index 000000000000..ac28f273449c
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/spaces.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_AR7_SPACES_H
+#define _ASM_AR7_SPACES_H
+
+/*
+ * This handles the memory map.
+ * We handle pages at KSEG0 for kernels with 32 bit address space.
+ */
+#define PAGE_OFFSET 0x94000000UL
+#define PHYS_OFFSET 0x14000000UL
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ar7/war.h b/arch/mips/include/asm/mach-ar7/war.h
new file mode 100644
index 000000000000..f4862b563080
--- /dev/null
+++ b/arch/mips/include/asm/mach-ar7/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_AR7_WAR_H
+#define __ASM_MIPS_MACH_AR7_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MIPS_MACH_AR7_WAR_H */
diff --git a/arch/mips/include/asm/mach-cobalt/irq.h b/arch/mips/include/asm/mach-cobalt/irq.h
index 57c8c9ac5851..9da9acf5dcba 100644
--- a/arch/mips/include/asm/mach-cobalt/irq.h
+++ b/arch/mips/include/asm/mach-cobalt/irq.h
@@ -8,7 +8,7 @@
* Copyright (C) 1997 Cobalt Microserver
* Copyright (C) 1997, 2003 Ralf Baechle
* Copyright (C) 2001-2003 Liam Davies (ldavies@agile.tv)
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*/
#ifndef _ASM_COBALT_IRQ_H
#define _ASM_COBALT_IRQ_H
diff --git a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
index ae9c5523c7ef..f8afec3f2943 100644
--- a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 07547231e078..59f28e9412c4 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -24,12 +24,10 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
#define parent_node(node) (node)
-#define node_to_cpumask(node) (hub_data(node)->h_cpus)
#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
-#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d3bea88d8744..d9743536a621 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -178,8 +178,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Mark current->active_mm as not "active" anymore.
* We don't want to mislead possible IPI tlb flush routines.
*/
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
local_irq_restore(flags);
}
@@ -235,8 +235,8 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
local_irq_restore(flags);
}
@@ -258,7 +258,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
local_irq_save(flags);
- if (cpu_isset(cpu, mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
new file mode 100644
index 000000000000..6ac5d3e3398e
--- /dev/null
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -0,0 +1,45 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2009 Cavium Networks
+ */
+
+#ifndef __PCI_OCTEON_H__
+#define __PCI_OCTEON_H__
+
+#include <linux/pci.h>
+
+/* Some PCI cards require delays when accessing config space. */
+#define PCI_CONFIG_SPACE_DELAY 10000
+
+/*
+ * pcibios_map_irq() is defined inside pci-octeon.c. All it does is
+ * call the Octeon specific version pointed to by this variable. This
+ * function needs to change for PCI or PCIe based hosts.
+ */
+extern int (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
+ u8 slot, u8 pin);
+
+/*
+ * The following defines are used when octeon_dma_bar_type =
+ * OCTEON_DMA_BAR_TYPE_BIG
+ */
+#define OCTEON_PCI_BAR1_HOLE_BITS 5
+#define OCTEON_PCI_BAR1_HOLE_SIZE (1ul<<(OCTEON_PCI_BAR1_HOLE_BITS+3))
+
+enum octeon_dma_bar_type {
+ OCTEON_DMA_BAR_TYPE_INVALID,
+ OCTEON_DMA_BAR_TYPE_SMALL,
+ OCTEON_DMA_BAR_TYPE_BIG,
+ OCTEON_DMA_BAR_TYPE_PCIE
+};
+
+/*
+ * This tells the DMA mapping system in dma-octeon.c how to map PCI
+ * DMA addresses.
+ */
+extern enum octeon_dma_bar_type octeon_dma_bar_type;
+
+#endif
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index dc0eaa731281..96a14a426a7c 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -165,7 +165,14 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
+#define pfn_valid(pfn) \
+({ \
+ unsigned long __pfn = (pfn); \
+ /* avoid <linux/bootmem.h> include hell */ \
+ extern unsigned long min_low_pfn; \
+ \
+ __pfn >= min_low_pfn && __pfn < max_mapnr; \
+})
#elif defined(CONFIG_SPARSEMEM)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index a68d111e55e9..5ebf82572ec0 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -65,8 +65,6 @@ extern int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
extern unsigned int pcibios_assign_all_busses(void);
-#define pcibios_scan_all_fns(a, b) 0
-
extern unsigned long PCIBIOS_MIN_IO;
extern unsigned long PCIBIOS_MIN_MEM;
diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
index 634b55d7e7f6..910e71a12466 100644
--- a/arch/mips/include/asm/reg.h
+++ b/arch/mips/include/asm/reg.h
@@ -69,7 +69,7 @@
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
#define EF_R0 0
#define EF_R1 1
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index fd545547b8aa..9e09af34c8a8 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -19,7 +19,7 @@ struct task_struct;
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
- void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
+ void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index aaa2d4ab26dc..e15f11a09311 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -78,6 +78,6 @@ extern void play_dead(void);
extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* __ASM_SMP_H */
diff --git a/arch/mips/include/asm/swab.h b/arch/mips/include/asm/swab.h
index 99993c0d6c12..97c2f81b4b43 100644
--- a/arch/mips/include/asm/swab.h
+++ b/arch/mips/include/asm/swab.h
@@ -38,7 +38,11 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
}
#define __arch_swab32 __arch_swab32
-#ifdef CONFIG_CPU_MIPS64_R2
+/*
+ * Having already checked for CONFIG_CPU_MIPSR2, enable the
+ * optimized version for 64-bit kernel on r2 CPUs.
+ */
+#ifdef CONFIG_64BIT
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
{
__asm__(
@@ -50,6 +54,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
return x;
}
#define __arch_swab64 __arch_swab64
-#endif /* CONFIG_CPU_MIPS64_R2 */
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_CPU_MIPSR2 */
#endif /* _ASM_SWAB_H */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 40005010827c..b70c49fdda26 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -352,16 +352,18 @@
#define __NR_inotify_init1 (__NR_Linux + 329)
#define __NR_preadv (__NR_Linux + 330)
#define __NR_pwritev (__NR_Linux + 331)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
+#define __NR_perf_counter_open (__NR_Linux + 333)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 331
+#define __NR_Linux_syscalls 333
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 331
+#define __NR_O32_Linux_syscalls 333
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -660,16 +662,18 @@
#define __NR_inotify_init1 (__NR_Linux + 288)
#define __NR_preadv (__NR_Linux + 289)
#define __NR_pwritev (__NR_Linux + 290)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
+#define __NR_perf_counter_open (__NR_Linux + 292)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 290
+#define __NR_Linux_syscalls 292
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 290
+#define __NR_64_Linux_syscalls 292
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -972,16 +976,18 @@
#define __NR_inotify_init1 (__NR_Linux + 292)
#define __NR_preadv (__NR_Linux + 293)
#define __NR_pwritev (__NR_Linux + 294)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295)
+#define __NR_perf_counter_open (__NR_Linux + 296)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 294
+#define __NR_Linux_syscalls 296
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 294
+#define __NR_N32_Linux_syscalls 296
#ifdef __KERNEL__
diff --git a/arch/mips/include/asm/vr41xx/capcella.h b/arch/mips/include/asm/vr41xx/capcella.h
index e0ee05a3dfcc..fcc6569414fa 100644
--- a/arch/mips/include/asm/vr41xx/capcella.h
+++ b/arch/mips/include/asm/vr41xx/capcella.h
@@ -1,7 +1,7 @@
/*
* capcella.h, Include file for ZAO Networks Capcella.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/vr41xx/giu.h b/arch/mips/include/asm/vr41xx/giu.h
index 0bcdd3a5c256..6a90bc1d916b 100644
--- a/arch/mips/include/asm/vr41xx/giu.h
+++ b/arch/mips/include/asm/vr41xx/giu.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series General-purpose I/O Unit.
*
- * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005-2009 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,7 +41,8 @@ typedef enum {
IRQ_SIGNAL_HOLD,
} irq_signal_t;
-extern void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_t signal);
+extern void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
+ irq_signal_t signal);
typedef enum {
IRQ_LEVEL_LOW,
@@ -51,23 +52,6 @@ typedef enum {
extern void vr41xx_set_irq_level(unsigned int pin, irq_level_t level);
typedef enum {
- GPIO_DATA_LOW,
- GPIO_DATA_HIGH,
- GPIO_DATA_INVAL,
-} gpio_data_t;
-
-extern gpio_data_t vr41xx_gpio_get_pin(unsigned int pin);
-extern int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data);
-
-typedef enum {
- GPIO_INPUT,
- GPIO_OUTPUT,
- GPIO_OUTPUT_DISABLE,
-} gpio_direction_t;
-
-extern int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir);
-
-typedef enum {
GPIO_PULL_DOWN,
GPIO_PULL_UP,
GPIO_PULL_DISABLE,
diff --git a/arch/mips/include/asm/vr41xx/irq.h b/arch/mips/include/asm/vr41xx/irq.h
index d315dfbc08f2..b07f7321751d 100644
--- a/arch/mips/include/asm/vr41xx/irq.h
+++ b/arch/mips/include/asm/vr41xx/irq.h
@@ -7,7 +7,7 @@
* Copyright (C) 2001, 2002 Paul Mundt
* Copyright (C) 2002 MontaVista Software, Inc.
* Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/arch/mips/include/asm/vr41xx/mpc30x.h b/arch/mips/include/asm/vr41xx/mpc30x.h
index 1d67df843dc3..130d09d8c8cb 100644
--- a/arch/mips/include/asm/vr41xx/mpc30x.h
+++ b/arch/mips/include/asm/vr41xx/mpc30x.h
@@ -1,7 +1,7 @@
/*
* mpc30x.h, Include file for Victor MP-C303/304.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/vr41xx/pci.h b/arch/mips/include/asm/vr41xx/pci.h
index 6fc01ce19777..c231a3d6cfd8 100644
--- a/arch/mips/include/asm/vr41xx/pci.h
+++ b/arch/mips/include/asm/vr41xx/pci.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series PCI Control Unit.
*
- * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/vr41xx/siu.h b/arch/mips/include/asm/vr41xx/siu.h
index da9f6e373409..ca806bc4ddc8 100644
--- a/arch/mips/include/asm/vr41xx/siu.h
+++ b/arch/mips/include/asm/vr41xx/siu.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2005-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/vr41xx/tb0219.h b/arch/mips/include/asm/vr41xx/tb0219.h
index dc981b4be0a4..c78e8243b447 100644
--- a/arch/mips/include/asm/vr41xx/tb0219.h
+++ b/arch/mips/include/asm/vr41xx/tb0219.h
@@ -1,7 +1,7 @@
/*
* tb0219.h, Include file for TANBAC TB0219.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* Modified for TANBAC TB0219:
* Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
diff --git a/arch/mips/include/asm/vr41xx/tb0226.h b/arch/mips/include/asm/vr41xx/tb0226.h
index de527dcfa5f3..36f5f798e416 100644
--- a/arch/mips/include/asm/vr41xx/tb0226.h
+++ b/arch/mips/include/asm/vr41xx/tb0226.h
@@ -1,7 +1,7 @@
/*
* tb0226.h, Include file for TANBAC TB0226.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/vr41xx/vr41xx.h b/arch/mips/include/asm/vr41xx/vr41xx.h
index 22be64971cc6..7b96a43b72ba 100644
--- a/arch/mips/include/asm/vr41xx/vr41xx.h
+++ b/arch/mips/include/asm/vr41xx/vr41xx.h
@@ -7,7 +7,7 @@
* Copyright (C) 2001, 2002 Paul Mundt
* Copyright (C) 2002 MontaVista Software, Inc.
* Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e1333d7319e2..ff448233dab5 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -53,6 +53,23 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
+
+/*
+ * When this file is selected, we are definitely running a 64bit kernel.
+ * So using the right regs define in asm/reg.h
+ */
+#define WANT_COMPAT_REG_H
+
+/* These MUST be defined before elf.h gets included */
+extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
+#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
+#define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \
+({ \
+ int __res = 1; \
+ elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \
+ __res; \
+})
+
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
@@ -110,9 +127,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
value->tv_usec = rem / NSEC_PER_USEC;
}
-#undef ELF_CORE_COPY_REGS
-#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
-
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index 1ada45ea0700..6996da4d74a2 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -1,7 +1,7 @@
/*
* DS1287 clockevent driver
*
- * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index e9b787feedcb..92351e00ae0e 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx clockevent routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index b551f48d3a07..23da108506b0 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -1,7 +1,7 @@
/*
* DEC I/O ASIC's counter clocksource
*
- * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 39000f103f2c..d2072cd38592 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -107,9 +107,7 @@ static unsigned int gic_irq_startup(unsigned int irq)
{
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
- /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
- GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
- 1 << (irq % 32));
+ GIC_SET_INTR_MASK(irq, 1);
return 0;
}
@@ -120,8 +118,7 @@ static void gic_irq_ack(unsigned int irq)
#endif
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
- GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
- 1 << (irq % 32));
+ GIC_CLR_INTR_MASK(irq, 1);
if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
if (!gic_wedgeb2bok)
@@ -138,18 +135,14 @@ static void gic_mask_irq(unsigned int irq)
{
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
- /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
- GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
- 1 << (irq % 32));
+ GIC_CLR_INTR_MASK(irq, 1);
}
static void gic_unmask_irq(unsigned int irq)
{
pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
irq -= _irqbase;
- /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
- GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
- 1 << (irq % 32));
+ GIC_SET_INTR_MASK(irq, 1);
}
#ifdef CONFIG_SMP
@@ -254,6 +247,10 @@ static void __init gic_basic_init(void)
if (cpu == X)
continue;
+ if (cpu == 0 && i != 0 && _intrmap[i].intrnum == 0 &&
+ _intrmap[i].ipiflag == 0)
+ continue;
+
setup_intr(_intrmap[i].intrnum,
_intrmap[i].cpunum,
_intrmap[i].pin,
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 1b81b131f43c..ebcc5f7ad9c2 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx IRQ routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 0b31b9bda048..20a86e08fd58 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -652,6 +652,8 @@ einval: li v0, -ENOSYS
sys sys_inotify_init1 1
sys sys_preadv 6 /* 4330 */
sys sys_pwritev 6
+ sys sys_rt_tgsigqueueinfo 4
+ sys sys_perf_counter_open 5
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index c647fd6e722f..b046130d4c5d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -489,4 +489,6 @@ sys_call_table:
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev /* 5390 */
+ PTR sys_rt_tgsigqueueinfo
+ PTR sys_perf_counter_open
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 93cc672f4522..15874f9812cc 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -415,4 +415,6 @@ EXPORT(sysn32_call_table)
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev
+ PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
+ PTR sys_perf_counter_open
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index a5598b2339dd..781e0f1e9533 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -535,4 +535,6 @@ sys_call_table:
PTR sys_inotify_init1
PTR compat_sys_preadv /* 4330 */
PTR compat_sys_pwritev
+ PTR compat_sys_rt_tgsigqueueinfo
+ PTR sys_perf_counter_open
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 653be061b9ec..e5cf5b88fc28 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -37,80 +37,24 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
-
-/*
- * Crude manipulation of the CPU masks to control which
- * which CPU's are brought online during initialisation
- *
- * Beware... this needs to be called after CPU discovery
- * but before CPU bringup
- */
-static int __init allowcpus(char *str)
-{
- cpumask_t cpu_allow_map;
- char buf[256];
- int len;
-
- cpus_clear(cpu_allow_map);
- if (cpulist_parse(str, &cpu_allow_map) == 0) {
- cpu_set(0, cpu_allow_map);
- cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
- len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
- buf[len] = '\0';
- pr_debug("Allowable CPUs: %s\n", buf);
- return 1;
- } else
- return 0;
-}
-__setup("allowcpus=", allowcpus);
+#include <asm/amon.h>
+#include <asm/gic.h>
static void ipi_call_function(unsigned int cpu)
{
- unsigned int action = 0;
-
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
- switch (cpu) {
- case 0:
- action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
- break;
- case 1:
- action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
- break;
- case 2:
- action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
- break;
- case 3:
- action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
- break;
- }
- gic_send_ipi(action);
+ gic_send_ipi(plat_ipi_call_int_xlate(cpu));
}
static void ipi_resched(unsigned int cpu)
{
- unsigned int action = 0;
-
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
- switch (cpu) {
- case 0:
- action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
- break;
- case 1:
- action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
- break;
- case 2:
- action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
- break;
- case 3:
- action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
- break;
- }
- gic_send_ipi(action);
+ gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
}
/*
@@ -136,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
@@ -206,7 +150,7 @@ static void cmp_boot_secondary(int cpu, struct task_struct *idle)
(unsigned long)(gp + sizeof(struct thread_info)));
#endif
- amon_cpu_start(cpu, pc, sp, gp, a0);
+ amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
}
/*
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 6f7ee5ac46ee..9538ca42e008 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 2508d55d68fd..00500fea2750 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
panic(KERN_ERR "%s called", __func__);
}
-static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index bc7d9b05e2f4..b2a7b81cba02 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -129,7 +129,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
@@ -184,7 +184,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 9021108eb9c1..05dd170a83f7 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -1,7 +1,7 @@
/*
* Count register synchronisation.
*
- * All CPUs will have their count registers synchronised to the CPU0 expirelo
+ * All CPUs will have their count registers synchronised to the CPU0 next time
* value. This can cause a small timewarp for CPU0. All other CPU's should
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
@@ -13,21 +13,22 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
-#include <linux/r4k-timer.h>
+#include <linux/cpumask.h>
+#include <asm/r4k-timer.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
-#include <asm/cpumask.h>
#include <asm/mipsregs.h>
-static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
-static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
-static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
+static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0);
+static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
+static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
+static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
-void __init synchronise_count_master(void)
+void __cpuinit synchronise_count_master(void)
{
int i;
unsigned long flags;
@@ -42,19 +43,20 @@ void __init synchronise_count_master(void)
return;
#endif
- pr_info("Checking COUNT synchronization across %u CPUs: ",
- num_online_cpus());
+ printk(KERN_INFO "Synchronize counters across %u CPUs: ",
+ num_online_cpus());
local_irq_save(flags);
/*
* Notify the slaves that it's time to start
*/
+ atomic_set(&count_reference, read_c0_count());
atomic_set(&count_start_flag, 1);
smp_wmb();
- /* Count will be initialised to expirelo for all CPU's */
- initcount = expirelo;
+ /* Count will be initialised to current timer for all CPU's */
+ initcount = read_c0_count();
/*
* We loop a few times to get a primed instruction cache,
@@ -106,7 +108,7 @@ void __init synchronise_count_master(void)
printk("done.\n");
}
-void __init synchronise_count_slave(void)
+void __cpuinit synchronise_count_slave(void)
{
int i;
unsigned long flags;
@@ -131,8 +133,8 @@ void __init synchronise_count_slave(void)
while (!atomic_read(&count_start_flag))
mb();
- /* Count will be initialised to expirelo for all CPU's */
- initcount = expirelo;
+ /* Count will be initialised to next expire for all CPU's */
+ initcount = atomic_read(&count_reference);
ncpus = num_online_cpus();
for (i = 0; i < NR_LOOPS; i++) {
@@ -156,4 +158,3 @@ void __init synchronise_count_slave(void)
local_irq_restore(flags);
}
#undef NR_LOOPS
-#endif
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 58738c8d754f..45901609b741 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -179,6 +179,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
+ *(.discard)
/* ABI crap starts here */
*(.MIPS.options)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 3ca5f42e819d..07b9ec2c6e3d 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1387,7 +1387,7 @@ static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
return len;
out_einval:
- return -EINVAL;;
+ return -EINVAL;
}
static struct device_attribute vpe_class_attributes[] = {
diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c
index d6e4f656ad14..5da30b6a65b7 100644
--- a/arch/mips/mipssim/sim_smtc.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
}
-static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
ssmtc_send_ipi_single(i, action);
}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index b165cdcb2818..d3914f2335de 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
* cores it has been used on
*/
if (vma)
- mask = vma->vm_mm->cpu_vm_mask;
+ mask = *mm_cpumask(vma->vm_mm);
else
mask = cpu_online_map;
cpu_clear(cpu, mask);
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 475038a141a6..27c807b67fea 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -30,6 +30,7 @@
#include <asm/cacheflush.h>
#include <asm/traps.h>
+#include <asm/gcmpregs.h>
#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/bonito64.h>
@@ -192,6 +193,8 @@ extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void)
{
+ int result;
+
prom_argc = fw_arg0;
_prom_argv = (int *) fw_arg1;
_prom_envp = (int *) fw_arg2;
@@ -358,12 +361,21 @@ void __init prom_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
+ /* Early detection of CMP support */
+ result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
+
#ifdef CONFIG_MIPS_CMP
- register_smp_ops(&cmp_smp_ops);
+ if (result)
+ register_smp_ops(&cmp_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMP
+#ifdef CONFIG_MIPS_CMP
+ if (!result)
+ register_smp_ops(&vsmp_smp_ops);
+#else
register_smp_ops(&vsmp_smp_ops);
#endif
+#endif
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index b4eaf137e4a7..a8756f82c31b 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -331,6 +331,21 @@ static struct irqaction irq_call = {
.flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_call"
};
+
+static int gic_resched_int_base;
+static int gic_call_int_base;
+#define GIC_RESCHED_INT(cpu) (gic_resched_int_base+(cpu))
+#define GIC_CALL_INT(cpu) (gic_call_int_base+(cpu))
+
+unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
+{
+ return GIC_CALL_INT(cpu);
+}
+
+unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
+{
+ return GIC_RESCHED_INT(cpu);
+}
#endif /* CONFIG_MIPS_MT_SMP */
static struct irqaction i8259irq = {
@@ -370,7 +385,7 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
* Interrupts and CPUs/Core Interrupts. The nature of the External
* Interrupts is also defined here - polarity/trigger.
*/
-static struct gic_intr_map gic_intr_map[] = {
+static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ GIC_EXT_INTR(0), X, X, X, X, 0 },
{ GIC_EXT_INTR(1), X, X, X, X, 0 },
{ GIC_EXT_INTR(2), X, X, X, X, 0 },
@@ -387,21 +402,14 @@ static struct gic_intr_map gic_intr_map[] = {
{ GIC_EXT_INTR(13), 0, GIC_MAP_TO_NMI_MSK, GIC_POL_POS, GIC_TRIG_LEVEL, 0 },
{ GIC_EXT_INTR(14), 0, GIC_MAP_TO_NMI_MSK, GIC_POL_POS, GIC_TRIG_LEVEL, 0 },
{ GIC_EXT_INTR(15), X, X, X, X, 0 },
- { GIC_EXT_INTR(16), 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(17), 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(18), 1, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(19), 1, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(20), 2, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(21), 2, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(22), 3, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
- { GIC_EXT_INTR(23), 3, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, 1 },
+/* This is the end of the general interrupts now we do IPI ones */
};
#endif
/*
* GCMP needs to be detected before any SMP initialisation
*/
-static int __init gcmp_probe(unsigned long addr, unsigned long size)
+int __init gcmp_probe(unsigned long addr, unsigned long size)
{
if (gcmp_present >= 0)
return gcmp_present;
@@ -416,28 +424,36 @@ static int __init gcmp_probe(unsigned long addr, unsigned long size)
}
#if defined(CONFIG_MIPS_MT_SMP)
+static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin)
+{
+ int intr = baseintr + cpu;
+ gic_intr_map[intr].intrnum = GIC_EXT_INTR(intr);
+ gic_intr_map[intr].cpunum = cpu;
+ gic_intr_map[intr].pin = cpupin;
+ gic_intr_map[intr].polarity = GIC_POL_POS;
+ gic_intr_map[intr].trigtype = GIC_TRIG_EDGE;
+ gic_intr_map[intr].ipiflag = 1;
+ ipi_map[cpu] |= (1 << (cpupin + 2));
+}
+
static void __init fill_ipi_map(void)
{
- int i;
+ int cpu;
- for (i = 0; i < ARRAY_SIZE(gic_intr_map); i++) {
- if (gic_intr_map[i].ipiflag && (gic_intr_map[i].cpunum != X))
- ipi_map[gic_intr_map[i].cpunum] |=
- (1 << (gic_intr_map[i].pin + 2));
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
+ fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
}
}
#endif
void __init arch_init_irq(void)
{
- int gic_present, gcmp_present;
-
init_i8259_irqs();
if (!cpu_has_veic)
mips_cpu_irq_init();
- gcmp_present = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
if (gcmp_present) {
GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK;
gic_present = 1;
@@ -514,24 +530,10 @@ void __init arch_init_irq(void)
if (gic_present) {
/* FIXME */
int i;
- struct {
- unsigned int resched;
- unsigned int call;
- } ipiirq[] = {
- {
- .resched = GIC_IPI_EXT_INTR_RESCHED_VPE0,
- .call = GIC_IPI_EXT_INTR_CALLFNC_VPE0},
- {
- .resched = GIC_IPI_EXT_INTR_RESCHED_VPE1,
- .call = GIC_IPI_EXT_INTR_CALLFNC_VPE1
- }, {
- .resched = GIC_IPI_EXT_INTR_RESCHED_VPE2,
- .call = GIC_IPI_EXT_INTR_CALLFNC_VPE2
- }, {
- .resched = GIC_IPI_EXT_INTR_RESCHED_VPE3,
- .call = GIC_IPI_EXT_INTR_CALLFNC_VPE3
- }
- };
+
+ gic_call_int_base = GIC_NUM_INTRS - NR_CPUS;
+ gic_resched_int_base = gic_call_int_base - NR_CPUS;
+
fill_ipi_map();
gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
if (!gcmp_present) {
@@ -553,12 +555,15 @@ void __init arch_init_irq(void)
printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
write_c0_status(0x1100dc00);
printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
- for (i = 0; i < ARRAY_SIZE(ipiirq); i++) {
- setup_irq(MIPS_GIC_IRQ_BASE + ipiirq[i].resched, &irq_resched);
- setup_irq(MIPS_GIC_IRQ_BASE + ipiirq[i].call, &irq_call);
-
- set_irq_handler(MIPS_GIC_IRQ_BASE + ipiirq[i].resched, handle_percpu_irq);
- set_irq_handler(MIPS_GIC_IRQ_BASE + ipiirq[i].call, handle_percpu_irq);
+ for (i = 0; i < NR_CPUS; i++) {
+ setup_irq(MIPS_GIC_IRQ_BASE +
+ GIC_RESCHED_INT(i), &irq_resched);
+ setup_irq(MIPS_GIC_IRQ_BASE +
+ GIC_CALL_INT(i), &irq_call);
+ set_irq_handler(MIPS_GIC_IRQ_BASE +
+ GIC_RESCHED_INT(i), handle_percpu_irq);
+ set_irq_handler(MIPS_GIC_IRQ_BASE +
+ GIC_CALL_INT(i), handle_percpu_irq);
}
} else {
/* set up ipi interrupts */
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
index 42dee4da37ba..f48d60e84290 100644
--- a/arch/mips/mti-malta/malta-reset.c
+++ b/arch/mips/mti-malta/malta-reset.c
@@ -28,9 +28,6 @@
#include <asm/reboot.h>
#include <asm/mips-boards/generic.h>
-static void mips_machine_restart(char *command);
-static void mips_machine_halt(void);
-
static void mips_machine_restart(char *command)
{
unsigned int __iomem *softres_reg =
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 499ffe5475df..192cfd2a539c 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
}
-static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
msmtc_send_ipi_single(i, action);
}
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index e8a97f59e066..63d8a297c58d 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -52,3 +52,8 @@ obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o
obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
+
+ifdef CONFIG_PCI_MSI
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
+endif
diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c
index 1416bca6d1a3..1c02f5737367 100644
--- a/arch/mips/pci/fixup-capcella.c
+++ b/arch/mips/pci/fixup-capcella.c
@@ -1,7 +1,7 @@
/*
* fixup-cappcela.c, The ZAO Networks Capcella specific PCI fixups.
*
- * Copyright (C) 2002,2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c
index 591159625722..e08f49cb6875 100644
--- a/arch/mips/pci/fixup-mpc30x.c
+++ b/arch/mips/pci/fixup-mpc30x.c
@@ -1,7 +1,7 @@
/*
* fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups.
*
- * Copyright (C) 2002,2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c
index ed87733f6796..8084b17d4406 100644
--- a/arch/mips/pci/fixup-tb0219.c
+++ b/arch/mips/pci/fixup-tb0219.c
@@ -2,7 +2,7 @@
* fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
*
* Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c
index e3eedf4bf9bd..4196ccf3ea3d 100644
--- a/arch/mips/pci/fixup-tb0226.c
+++ b/arch/mips/pci/fixup-tb0226.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
*
- * Copyright (C) 2002-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c
index 267ab3dc3d42..2fe29db43725 100644
--- a/arch/mips/pci/fixup-tb0287.c
+++ b/arch/mips/pci/fixup-tb0287.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
*
- * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
new file mode 100644
index 000000000000..03742e647657
--- /dev/null
+++ b/arch/mips/pci/msi-octeon.c
@@ -0,0 +1,288 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2009 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/pci-octeon.h>
+
+/*
+ * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is
+ * in use.
+ */
+static uint64_t msi_free_irq_bitmask;
+
+/*
+ * Each bit in msi_multiple_irq_bitmask tells that the device using
+ * this bit in msi_free_irq_bitmask is also using the next bit. This
+ * is used so we can disable all of the MSI interrupts when a device
+ * uses multiple.
+ */
+static uint64_t msi_multiple_irq_bitmask;
+
+/*
+ * This lock controls updates to msi_free_irq_bitmask and
+ * msi_multiple_irq_bitmask.
+ */
+static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock);
+
+
+/**
+ * Called when a driver request MSI interrupts instead of the
+ * legacy INT A-D. This routine will allocate multiple interrupts
+ * for MSI devices that support them. A device can override this by
+ * programming the MSI control bits [6:4] before calling
+ * pci_enable_msi().
+ *
+ * @dev: Device requesting MSI interrupts
+ * @desc: MSI descriptor
+ *
+ * Returns 0 on success.
+ */
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ struct msi_msg msg;
+ uint16_t control;
+ int configured_private_bits;
+ int request_private_bits;
+ int irq;
+ int irq_step;
+ uint64_t search_mask;
+
+ /*
+ * Read the MSI config to figure out how many IRQs this device
+ * wants. Most devices only want 1, which will give
+ * configured_private_bits and request_private_bits equal 0.
+ */
+ pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+ &control);
+
+ /*
+ * If the number of private bits has been configured then use
+ * that value instead of the requested number. This gives the
+ * driver the chance to override the number of interrupts
+ * before calling pci_enable_msi().
+ */
+ configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (configured_private_bits == 0) {
+ /* Nothing is configured, so use the hardware requested size */
+ request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ } else {
+ /*
+ * Use the number of configured bits, assuming the
+ * driver wanted to override the hardware request
+ * value.
+ */
+ request_private_bits = configured_private_bits;
+ }
+
+ /*
+ * The PCI 2.3 spec mandates that there are at most 32
+ * interrupts. If this device asks for more, only give it one.
+ */
+ if (request_private_bits > 5)
+ request_private_bits = 0;
+
+try_only_one:
+ /*
+ * The IRQs have to be aligned on a power of two based on the
+ * number being requested.
+ */
+ irq_step = 1 << request_private_bits;
+
+ /* Mask with one bit for each IRQ */
+ search_mask = (1 << irq_step) - 1;
+
+ /*
+ * We're going to search msi_free_irq_bitmask_lock for zero
+ * bits. This represents an MSI interrupt number that isn't in
+ * use.
+ */
+ spin_lock(&msi_free_irq_bitmask_lock);
+ for (irq = 0; irq < 64; irq += irq_step) {
+ if ((msi_free_irq_bitmask & (search_mask << irq)) == 0) {
+ msi_free_irq_bitmask |= search_mask << irq;
+ msi_multiple_irq_bitmask |= (search_mask >> 1) << irq;
+ break;
+ }
+ }
+ spin_unlock(&msi_free_irq_bitmask_lock);
+
+ /* Make sure the search for available interrupts didn't fail */
+ if (irq >= 64) {
+ if (request_private_bits) {
+ pr_err("arch_setup_msi_irq: Unable to find %d free "
+ "interrupts, trying just one",
+ 1 << request_private_bits);
+ request_private_bits = 0;
+ goto try_only_one;
+ } else
+ panic("arch_setup_msi_irq: Unable to find a free MSI "
+ "interrupt");
+ }
+
+ /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */
+ irq += OCTEON_IRQ_MSI_BIT0;
+
+ switch (octeon_dma_bar_type) {
+ case OCTEON_DMA_BAR_TYPE_SMALL:
+ /* When not using big bar, Bar 0 is based at 128MB */
+ msg.address_lo =
+ ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
+ msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
+ case OCTEON_DMA_BAR_TYPE_BIG:
+ /* When using big bar, Bar 0 is based at 0 */
+ msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
+ msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32;
+ break;
+ case OCTEON_DMA_BAR_TYPE_PCIE:
+ /* When using PCIe, Bar 0 is based at 0 */
+ /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */
+ msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff;
+ msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
+ break;
+ default:
+ panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type\n");
+ }
+ msg.data = irq - OCTEON_IRQ_MSI_BIT0;
+
+ /* Update the number of IRQs the device has available to it */
+ control &= ~PCI_MSI_FLAGS_QSIZE;
+ control |= request_private_bits << 4;
+ pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+ control);
+
+ set_irq_msi(irq, desc);
+ write_msi_msg(irq, &msg);
+ return 0;
+}
+
+
+/**
+ * Called when a device no longer needs its MSI interrupts. All
+ * MSI interrupts for the device are freed.
+ *
+ * @irq: The devices first irq number. There may be multple in sequence.
+ */
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ int number_irqs;
+ uint64_t bitmask;
+
+ if ((irq < OCTEON_IRQ_MSI_BIT0) || (irq > OCTEON_IRQ_MSI_BIT63))
+ panic("arch_teardown_msi_irq: Attempted to teardown illegal "
+ "MSI interrupt (%d)", irq);
+ irq -= OCTEON_IRQ_MSI_BIT0;
+
+ /*
+ * Count the number of IRQs we need to free by looking at the
+ * msi_multiple_irq_bitmask. Each bit set means that the next
+ * IRQ is also owned by this device.
+ */
+ number_irqs = 0;
+ while ((irq+number_irqs < 64) &&
+ (msi_multiple_irq_bitmask & (1ull << (irq + number_irqs))))
+ number_irqs++;
+ number_irqs++;
+ /* Mask with one bit for each IRQ */
+ bitmask = (1 << number_irqs) - 1;
+ /* Shift the mask to the correct bit location */
+ bitmask <<= irq;
+ if ((msi_free_irq_bitmask & bitmask) != bitmask)
+ panic("arch_teardown_msi_irq: Attempted to teardown MSI "
+ "interrupt (%d) not in use", irq);
+
+ /* Checks are done, update the in use bitmask */
+ spin_lock(&msi_free_irq_bitmask_lock);
+ msi_free_irq_bitmask &= ~bitmask;
+ msi_multiple_irq_bitmask &= ~bitmask;
+ spin_unlock(&msi_free_irq_bitmask_lock);
+}
+
+
+/*
+ * Called by the interrupt handling code when an MSI interrupt
+ * occurs.
+ */
+static irqreturn_t octeon_msi_interrupt(int cpl, void *dev_id)
+{
+ uint64_t msi_bits;
+ int irq;
+
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE)
+ msi_bits = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_RCV0);
+ else
+ msi_bits = cvmx_read_csr(CVMX_NPI_NPI_MSI_RCV);
+ irq = fls64(msi_bits);
+ if (irq) {
+ irq += OCTEON_IRQ_MSI_BIT0 - 1;
+ if (irq_desc[irq].action) {
+ do_IRQ(irq);
+ return IRQ_HANDLED;
+ } else {
+ pr_err("Spurious MSI interrupt %d\n", irq);
+ if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /* These chips have PCIe */
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
+ 1ull << (irq -
+ OCTEON_IRQ_MSI_BIT0));
+ } else {
+ /* These chips have PCI */
+ cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
+ 1ull << (irq -
+ OCTEON_IRQ_MSI_BIT0));
+ }
+ }
+ }
+ return IRQ_NONE;
+}
+
+
+/*
+ * Initializes the MSI interrupt handling code
+ */
+int octeon_msi_initialize(void)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt,
+ IRQF_SHARED,
+ "MSI[0:63]", octeon_msi_interrupt))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
+ } else if (octeon_is_pci_host()) {
+ if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt,
+ IRQF_SHARED,
+ "MSI[0:15]", octeon_msi_interrupt))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt,
+ IRQF_SHARED,
+ "MSI[16:31]", octeon_msi_interrupt))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt,
+ IRQF_SHARED,
+ "MSI[32:47]", octeon_msi_interrupt))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
+
+ if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt,
+ IRQF_SHARED,
+ "MSI[48:63]", octeon_msi_interrupt))
+ panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
+
+ }
+ return 0;
+}
+
+subsys_initcall(octeon_msi_initialize);
diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c
index 900c6b32576c..28962a7c6606 100644
--- a/arch/mips/pci/ops-vr41xx.c
+++ b/arch/mips/pci/ops-vr41xx.c
@@ -2,8 +2,8 @@
* ops-vr41xx.c, PCI configuration routines for the PCIU of NEC VR4100 series.
*
* Copyright (C) 2001-2003 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@
*/
/*
* Changes:
- * MontaVista Software Inc. <yyuasa@mvista.com> or <source@mvista.com>
+ * MontaVista Software Inc. <source@mvista.com>
* - New creation, NEC VR4122 and VR4131 are supported.
*/
#include <linux/pci.h>
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
new file mode 100644
index 000000000000..9cb0c807f564
--- /dev/null
+++ b/arch/mips/pci/pci-octeon.c
@@ -0,0 +1,675 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2009 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/pci-octeon.h>
+
+#define USE_OCTEON_INTERNAL_ARBITER
+
+/*
+ * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
+ * addresses. Use PCI endian swapping 1 so no address swapping is
+ * necessary. The Linux io routines will endian swap the data.
+ */
+#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
+#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
+
+/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
+#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
+
+/**
+ * This is the bit decoding used for the Octeon PCI controller addresses
+ */
+union octeon_pci_address {
+ uint64_t u64;
+ struct {
+ uint64_t upper:2;
+ uint64_t reserved:13;
+ uint64_t io:1;
+ uint64_t did:5;
+ uint64_t subdid:3;
+ uint64_t reserved2:4;
+ uint64_t endian_swap:2;
+ uint64_t reserved3:10;
+ uint64_t bus:8;
+ uint64_t dev:5;
+ uint64_t func:3;
+ uint64_t reg:8;
+ } s;
+};
+
+int __initdata (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
+ u8 slot, u8 pin);
+enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
+
+/**
+ * Map a PCI device to the appropriate interrupt line
+ *
+ * @dev: The Linux PCI device structure for the device to map
+ * @slot: The slot number for this device on __BUS 0__. Linux
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
+ * @pin: The PCI interrupt pin read from the device, then swizzled
+ * as it goes through each bridge.
+ * Returns Interrupt number for the device
+ */
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (octeon_pcibios_map_irq)
+ return octeon_pcibios_map_irq(dev, slot, pin);
+ else
+ panic("octeon_pcibios_map_irq not set.");
+}
+
+
+/*
+ * Called to perform platform specific PCI setup
+ */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ uint16_t config;
+ uint32_t dconfig;
+ int pos;
+ /*
+ * Force the Cache line setting to 64 bytes. The standard
+ * Linux bus scan doesn't seem to set it. Octeon really has
+ * 128 byte lines, but Intel bridges get really upset if you
+ * try and set values above 64 bytes. Value is specified in
+ * 32bit words.
+ */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
+ /* Set latency timers for all devices */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48);
+
+ /* Enable reporting System errors and parity errors on all devices */
+ /* Enable parity checking and error reporting */
+ pci_read_config_word(dev, PCI_COMMAND, &config);
+ config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ pci_write_config_word(dev, PCI_COMMAND, config);
+
+ if (dev->subordinate) {
+ /* Set latency timers on sub bridges */
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48);
+ /* More bridge error detection */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
+ config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
+ }
+
+ /* Enable the PCIe normal error reporting */
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pos) {
+ /* Update Device Control */
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
+ /* Correctable Error Reporting */
+ config |= PCI_EXP_DEVCTL_CERE;
+ /* Non-Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_NFERE;
+ /* Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_FERE;
+ /* Unsupported Request */
+ config |= PCI_EXP_DEVCTL_URRE;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
+ }
+
+ /* Find the Advanced Error Reporting capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ /* Clear Uncorrectable Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ dconfig);
+ /* Enable reporting of all uncorrectable errors */
+ /* Uncorrectable Error Mask - turned on bits disable errors */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
+ /*
+ * Leave severity at HW default. This only controls if
+ * errors are reported as uncorrectable or
+ * correctable, not if the error is reported.
+ */
+ /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
+ /* Clear Correctable Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
+ /* Enable reporting of all correctable errors */
+ /* Correctable Error Mask - turned on bits disable errors */
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
+ /* Advanced Error Capabilities */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
+ /* ECRC Generation Enable */
+ if (config & PCI_ERR_CAP_ECRC_GENC)
+ config |= PCI_ERR_CAP_ECRC_GENE;
+ /* ECRC Check Enable */
+ if (config & PCI_ERR_CAP_ECRC_CHKC)
+ config |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
+ /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
+ /* Report all errors to the root complex */
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
+ PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ /* Clear the Root status register */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
+ }
+
+ return 0;
+}
+
+/**
+ * Return the mapping of PCI device number to IRQ line. Each
+ * character in the return string represents the interrupt
+ * line for the device at that position. Device 1 maps to the
+ * first character, etc. The characters A-D are used for PCI
+ * interrupts.
+ *
+ * Returns PCI interrupt mapping
+ */
+const char *octeon_get_pci_interrupts(void)
+{
+ /*
+ * Returning an empty string causes the interrupts to be
+ * routed based on the PCI specification. From the PCI spec:
+ *
+ * INTA# of Device Number 0 is connected to IRQW on the system
+ * board. (Device Number has no significance regarding being
+ * located on the system board or in a connector.) INTA# of
+ * Device Number 1 is connected to IRQX on the system
+ * board. INTA# of Device Number 2 is connected to IRQY on the
+ * system board. INTA# of Device Number 3 is connected to IRQZ
+ * on the system board. The table below describes how each
+ * agent's INTx# lines are connected to the system board
+ * interrupt lines. The following equation can be used to
+ * determine to which INTx# signal on the system board a given
+ * device's INTx# line(s) is connected.
+ *
+ * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
+ * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
+ * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
+ * INTD# = 3)
+ */
+ switch (octeon_bootinfo->board_type) {
+ case CVMX_BOARD_TYPE_NAO38:
+ /* This is really the NAC38 */
+ return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
+ case CVMX_BOARD_TYPE_THUNDER:
+ return "";
+ case CVMX_BOARD_TYPE_EBH3000:
+ return "";
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ return "AABCD";
+ default:
+ return "";
+ }
+}
+
+/**
+ * Map a PCI device to the appropriate interrupt line
+ *
+ * @dev: The Linux PCI device structure for the device to map
+ * @slot: The slot number for this device on __BUS 0__. Linux
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
+ * @pin: The PCI interrupt pin read from the device, then swizzled
+ * as it goes through each bridge.
+ * Returns Interrupt number for the device
+ */
+int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
+ u8 slot, u8 pin)
+{
+ int irq_num;
+ const char *interrupts;
+ int dev_num;
+
+ /* Get the board specific interrupt mapping */
+ interrupts = octeon_get_pci_interrupts();
+
+ dev_num = dev->devfn >> 3;
+ if (dev_num < strlen(interrupts))
+ irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
+ OCTEON_IRQ_PCI_INT0;
+ else
+ irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
+ return irq_num;
+}
+
+
+/*
+ * Read a value from configuration space
+ */
+static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ union octeon_pci_address pci_addr;
+
+ pci_addr.u64 = 0;
+ pci_addr.s.upper = 2;
+ pci_addr.s.io = 1;
+ pci_addr.s.did = 3;
+ pci_addr.s.subdid = 1;
+ pci_addr.s.endian_swap = 1;
+ pci_addr.s.bus = bus->number;
+ pci_addr.s.dev = devfn >> 3;
+ pci_addr.s.func = devfn & 0x7;
+ pci_addr.s.reg = reg;
+
+#if PCI_CONFIG_SPACE_DELAY
+ udelay(PCI_CONFIG_SPACE_DELAY);
+#endif
+ switch (size) {
+ case 4:
+ *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
+ return PCIBIOS_SUCCESSFUL;
+ case 2:
+ *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
+ return PCIBIOS_SUCCESSFUL;
+ case 1:
+ *val = cvmx_read64_uint8(pci_addr.u64);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+
+/*
+ * Write a value to PCI configuration space
+ */
+static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ union octeon_pci_address pci_addr;
+
+ pci_addr.u64 = 0;
+ pci_addr.s.upper = 2;
+ pci_addr.s.io = 1;
+ pci_addr.s.did = 3;
+ pci_addr.s.subdid = 1;
+ pci_addr.s.endian_swap = 1;
+ pci_addr.s.bus = bus->number;
+ pci_addr.s.dev = devfn >> 3;
+ pci_addr.s.func = devfn & 0x7;
+ pci_addr.s.reg = reg;
+
+#if PCI_CONFIG_SPACE_DELAY
+ udelay(PCI_CONFIG_SPACE_DELAY);
+#endif
+ switch (size) {
+ case 4:
+ cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
+ return PCIBIOS_SUCCESSFUL;
+ case 2:
+ cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
+ return PCIBIOS_SUCCESSFUL;
+ case 1:
+ cvmx_write64_uint8(pci_addr.u64, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+
+static struct pci_ops octeon_pci_ops = {
+ octeon_read_config,
+ octeon_write_config,
+};
+
+static struct resource octeon_pci_mem_resource = {
+ .start = 0,
+ .end = 0,
+ .name = "Octeon PCI MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+/*
+ * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
+ * bridge
+ */
+static struct resource octeon_pci_io_resource = {
+ .start = 0x4000,
+ .end = OCTEON_PCI_IOSPACE_SIZE - 1,
+ .name = "Octeon PCI IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_pci_controller = {
+ .pci_ops = &octeon_pci_ops,
+ .mem_resource = &octeon_pci_mem_resource,
+ .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
+ .io_resource = &octeon_pci_io_resource,
+ .io_offset = 0,
+ .io_map_base = OCTEON_PCI_IOSPACE_BASE,
+};
+
+
+/*
+ * Low level initialize the Octeon PCI controller
+ */
+static void octeon_pci_initialize(void)
+{
+ union cvmx_pci_cfg01 cfg01;
+ union cvmx_npi_ctl_status ctl_status;
+ union cvmx_pci_ctl_status_2 ctl_status_2;
+ union cvmx_pci_cfg19 cfg19;
+ union cvmx_pci_cfg16 cfg16;
+ union cvmx_pci_cfg22 cfg22;
+ union cvmx_pci_cfg56 cfg56;
+
+ /* Reset the PCI Bus */
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
+ cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+
+ udelay(2000); /* Hold PCI reset for 2 ms */
+
+ ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
+ ctl_status.s.max_word = 1;
+ ctl_status.s.timer = 1;
+ cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
+
+ /* Deassert PCI reset and advertize PCX Host Mode Device Capability
+ (64b) */
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
+ cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+
+ udelay(2000); /* Wait 2 ms after deasserting PCI reset */
+
+ ctl_status_2.u32 = 0;
+ ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set
+ before any PCI reads. */
+ ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */
+ ctl_status_2.s.bar2_enb = 1;
+ ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */
+ ctl_status_2.s.bar2_esx = 1;
+ ctl_status_2.s.pmo_amod = 1; /* Round robin priority */
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
+ /* BAR1 hole */
+ ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
+ ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
+ ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
+ ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
+ ctl_status_2.s.bb1 = 1; /* BAR1 is big */
+ ctl_status_2.s.bb0 = 1; /* BAR0 is big */
+ }
+
+ octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
+ udelay(2000); /* Wait 2 ms before doing PCI reads */
+
+ ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
+ pr_notice("PCI Status: %s %s-bit\n",
+ ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
+ ctl_status_2.s.ap_64ad ? "64" : "32");
+
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ union cvmx_pci_cnt_reg cnt_reg_start;
+ union cvmx_pci_cnt_reg cnt_reg_end;
+ unsigned long cycles, pci_clock;
+
+ cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
+ cycles = read_c0_cvmcount();
+ udelay(1000);
+ cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
+ cycles = read_c0_cvmcount() - cycles;
+ pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
+ (cycles / (mips_hpt_frequency / 1000000));
+ pr_notice("PCI Clock: %lu MHz\n", pci_clock);
+ }
+
+ /*
+ * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
+ * in PCI-X mode to allow four oustanding splits. Otherwise,
+ * should not change from its reset value. Don't write PCI_CFG19
+ * in PCI mode (0x82000001 reset value), write it to 0x82000004
+ * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
+ * MRBCM -> must be one.
+ */
+ if (ctl_status_2.s.ap_pcix) {
+ cfg19.u32 = 0;
+ /*
+ * Target Delayed/Split request outstanding maximum
+ * count. [1..31] and 0=32. NOTE: If the user
+ * programs these bits beyond the Designed Maximum
+ * outstanding count, then the designed maximum table
+ * depth will be used instead. No additional
+ * Deferred/Split transactions will be accepted if
+ * this outstanding maximum count is
+ * reached. Furthermore, no additional deferred/split
+ * transactions will be accepted if the I/O delay/ I/O
+ * Split Request outstanding maximum is reached.
+ */
+ cfg19.s.tdomc = 4;
+ /*
+ * Master Deferred Read Request Outstanding Max Count
+ * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
+ * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
+ * 5 2 110 6 3 111 7 3 For example, if these bits are
+ * programmed to 100, the core can support 2 DAC
+ * cycles, 4 SAC cycles or a combination of 1 DAC and
+ * 2 SAC cycles. NOTE: For the PCI-X maximum
+ * outstanding split transactions, refer to
+ * CRE0[22:20].
+ */
+ cfg19.s.mdrrmc = 2;
+ /*
+ * Master Request (Memory Read) Byte Count/Byte Enable
+ * select. 0 = Byte Enables valid. In PCI mode, a
+ * burst transaction cannot be performed using Memory
+ * Read command=4?h6. 1 = DWORD Byte Count valid
+ * (default). In PCI Mode, the memory read byte
+ * enables are automatically generated by the
+ * core. Note: N3 Master Request transaction sizes are
+ * always determined through the
+ * am_attr[<35:32>|<7:0>] field.
+ */
+ cfg19.s.mrbcm = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
+ }
+
+
+ cfg01.u32 = 0;
+ cfg01.s.msae = 1; /* Memory Space Access Enable */
+ cfg01.s.me = 1; /* Master Enable */
+ cfg01.s.pee = 1; /* PERR# Enable */
+ cfg01.s.see = 1; /* System Error Enable */
+ cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */
+
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+
+#ifdef USE_OCTEON_INTERNAL_ARBITER
+ /*
+ * When OCTEON is a PCI host, most systems will use OCTEON's
+ * internal arbiter, so must enable it before any PCI/PCI-X
+ * traffic can occur.
+ */
+ {
+ union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
+
+ pci_int_arb_cfg.u64 = 0;
+ pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */
+ cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
+ }
+#endif /* USE_OCTEON_INTERNAL_ARBITER */
+
+ /*
+ * Preferrably written to 1 to set MLTD. [RDSATI,TRTAE,
+ * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
+ * 1..7.
+ */
+ cfg16.u32 = 0;
+ cfg16.s.mltd = 1; /* Master Latency Timer Disable */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
+
+ /*
+ * Should be written to 0x4ff00. MTTV -> must be zero.
+ * FLUSH -> must be 1. MRV -> should be 0xFF.
+ */
+ cfg22.u32 = 0;
+ /* Master Retry Value [1..255] and 0=infinite */
+ cfg22.s.mrv = 0xff;
+ /*
+ * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
+ * N3K operation.
+ */
+ cfg22.s.flush = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
+
+ /*
+ * MOST Indicates the maximum number of outstanding splits (in -1
+ * notation) when OCTEON is in PCI-X mode. PCI-X performance is
+ * affected by the MOST selection. Should generally be written
+ * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
+ * depending on the desired MOST of 3, 2, 1, or 0, respectively.
+ */
+ cfg56.u32 = 0;
+ cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */
+ cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */
+ cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */
+ cfg56.s.roe = 1; /* Relaxed Ordering Enable */
+ cfg56.s.mmbc = 1; /* Maximum Memory Byte Count
+ [0=512B,1=1024B,2=2048B,3=4096B] */
+ cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1
+ .. 7=32] */
+
+ octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
+
+ /*
+ * Affects PCI performance when OCTEON services reads to its
+ * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
+ * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
+ * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
+ * these values need to be changed so they won't possibly prefetch off
+ * of the end of memory if PCI is DMAing a buffer at the end of
+ * memory. Note that these values differ from their reset values.
+ */
+ octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
+ octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
+ octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
+}
+
+
+/*
+ * Initialize the Octeon PCI controller
+ */
+static int __init octeon_pci_setup(void)
+{
+ union cvmx_npi_mem_access_subidx mem_access;
+ int index;
+
+ /* Only these chips have PCI */
+ if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ return 0;
+
+ /* Point pcibios_map_irq() to the PCI version of it */
+ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
+
+ /* Only use the big bars on chips that support it */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
+ else
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
+
+ /* PCI I/O and PCI MEM values */
+ set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
+ ioport_resource.start = 0;
+ ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
+ if (!octeon_is_pci_host()) {
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
+ return 0;
+ }
+
+ pr_notice("%s Octeon big bar support\n",
+ (octeon_dma_bar_type ==
+ OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
+
+ octeon_pci_initialize();
+
+ mem_access.u64 = 0;
+ mem_access.s.esr = 1; /* Endian-Swap on read. */
+ mem_access.s.esw = 1; /* Endian-Swap on write. */
+ mem_access.s.nsr = 0; /* No-Snoop on read. */
+ mem_access.s.nsw = 0; /* No-Snoop on write. */
+ mem_access.s.ror = 0; /* Relax Read on read. */
+ mem_access.s.row = 0; /* Relax Order on write. */
+ mem_access.s.ba = 0; /* PCI Address bits [63:36]. */
+ cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
+
+ /*
+ * Remap the Octeon BAR 2 above all 32 bit devices
+ * (0x8000000000ul). This is done here so it is remapped
+ * before the readl()'s below. We don't want BAR2 overlapping
+ * with BAR0/BAR1 during these reads.
+ */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80);
+
+ /* Disable the BAR1 movable mappings */
+ for (index = 0; index < 32; index++)
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
+
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
+ /* Remap the Octeon BAR 0 to 0-2GB */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
+
+ /*
+ * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
+ * BAR 1 hole).
+ */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+
+ /* Devices go after BAR1 */
+ octeon_pci_mem_resource.start =
+ OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
+ (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
+ octeon_pci_mem_resource.end =
+ octeon_pci_mem_resource.start + (1ul << 30);
+ } else {
+ /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
+
+ /* Remap the Octeon BAR 1 to map 0-128MB */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+
+ /* Devices go after BAR0 */
+ octeon_pci_mem_resource.start =
+ OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
+ (4ul << 10);
+ octeon_pci_mem_resource.end =
+ octeon_pci_mem_resource.start + (1ul << 30);
+ }
+
+ register_pci_controller(&octeon_pci_controller);
+
+ /*
+ * Clear any errors that might be pending from before the bus
+ * was setup properly.
+ */
+ cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
+ return 0;
+}
+
+arch_initcall(octeon_pci_setup);
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
index d1e049b55f34..56525711f8b7 100644
--- a/arch/mips/pci/pci-vr41xx.c
+++ b/arch/mips/pci/pci-vr41xx.c
@@ -2,8 +2,8 @@
* pci-vr41xx.c, PCI Control Unit routines for the NEC VR4100 series.
*
* Copyright (C) 2001-2003 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2004-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or modify
@@ -22,7 +22,7 @@
*/
/*
* Changes:
- * MontaVista Software Inc. <yyuasa@mvista.com> or <source@mvista.com>
+ * MontaVista Software Inc. <source@mvista.com>
* - New creation, NEC VR4122 and VR4131 are supported.
*/
#include <linux/init.h>
diff --git a/arch/mips/pci/pci-vr41xx.h b/arch/mips/pci/pci-vr41xx.h
index 8a35e32b8376..6b1ae2eb1c06 100644
--- a/arch/mips/pci/pci-vr41xx.h
+++ b/arch/mips/pci/pci-vr41xx.h
@@ -2,8 +2,8 @@
* pci-vr41xx.h, Include file for PCI Control Unit of the NEC VR4100 series.
*
* Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
new file mode 100644
index 000000000000..75262247f3e4
--- /dev/null
+++ b/arch/mips/pci/pcie-octeon.c
@@ -0,0 +1,1369 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pciercx-defs.h>
+#include <asm/octeon/cvmx-pescx-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include <asm/octeon/cvmx-helper-errata.h>
+#include <asm/octeon/pci-octeon.h>
+
+union cvmx_pcie_address {
+ uint64_t u64;
+ struct {
+ uint64_t upper:2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t io:1; /* 1 for IO space access */
+ uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t subdid:3; /* PCIe SubDID = 1 */
+ uint64_t reserved_36_39:4; /* Must be zero */
+ uint64_t es:2; /* Endian swap = 1 */
+ uint64_t port:2; /* PCIe port 0,1 */
+ uint64_t reserved_29_31:3; /* Must be zero */
+ /*
+ * Selects the type of the configuration request (0 = type 0,
+ * 1 = type 1).
+ */
+ uint64_t ty:1;
+ /* Target bus number sent in the ID in the request. */
+ uint64_t bus:8;
+ /*
+ * Target device number sent in the ID in the
+ * request. Note that Dev must be zero for type 0
+ * configuration requests.
+ */
+ uint64_t dev:5;
+ /* Target function number sent in the ID in the request. */
+ uint64_t func:3;
+ /*
+ * Selects a register in the configuration space of
+ * the target.
+ */
+ uint64_t reg:12;
+ } config;
+ struct {
+ uint64_t upper:2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t io:1; /* 1 for IO space access */
+ uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t subdid:3; /* PCIe SubDID = 2 */
+ uint64_t reserved_36_39:4; /* Must be zero */
+ uint64_t es:2; /* Endian swap = 1 */
+ uint64_t port:2; /* PCIe port 0,1 */
+ uint64_t address:32; /* PCIe IO address */
+ } io;
+ struct {
+ uint64_t upper:2; /* Normally 2 for XKPHYS */
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t io:1; /* 1 for IO space access */
+ uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t subdid:3; /* PCIe SubDID = 3-6 */
+ uint64_t reserved_36_39:4; /* Must be zero */
+ uint64_t address:36; /* PCIe Mem address */
+ } mem;
+};
+
+/**
+ * Return the Core virtual base address for PCIe IO access. IOs are
+ * read/written as an offset from this address.
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns 64bit Octeon IO base address for read/write
+ */
+static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
+{
+ union cvmx_pcie_address pcie_addr;
+ pcie_addr.u64 = 0;
+ pcie_addr.io.upper = 0;
+ pcie_addr.io.io = 1;
+ pcie_addr.io.did = 3;
+ pcie_addr.io.subdid = 2;
+ pcie_addr.io.es = 1;
+ pcie_addr.io.port = pcie_port;
+ return pcie_addr.u64;
+}
+
+/**
+ * Size of the IO address region returned at address
+ * cvmx_pcie_get_io_base_address()
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns Size of the IO window
+ */
+static inline uint64_t cvmx_pcie_get_io_size(int pcie_port)
+{
+ return 1ull << 32;
+}
+
+/**
+ * Return the Core virtual base address for PCIe MEM access. Memory is
+ * read/written as an offset from this address.
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns 64bit Octeon IO base address for read/write
+ */
+static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
+{
+ union cvmx_pcie_address pcie_addr;
+ pcie_addr.u64 = 0;
+ pcie_addr.mem.upper = 0;
+ pcie_addr.mem.io = 1;
+ pcie_addr.mem.did = 3;
+ pcie_addr.mem.subdid = 3 + pcie_port;
+ return pcie_addr.u64;
+}
+
+/**
+ * Size of the Mem address region returned at address
+ * cvmx_pcie_get_mem_base_address()
+ *
+ * @pcie_port: PCIe port the IO is for
+ *
+ * Returns Size of the Mem window
+ */
+static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
+{
+ return 1ull << 36;
+}
+
+/**
+ * Read a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @pcie_port: PCIe port to read from
+ * @cfg_offset: Address to read
+ *
+ * Returns Value read
+ */
+static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
+{
+ union cvmx_pescx_cfg_rd pescx_cfg_rd;
+ pescx_cfg_rd.u64 = 0;
+ pescx_cfg_rd.s.addr = cfg_offset;
+ cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
+ pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
+ return pescx_cfg_rd.s.data;
+}
+
+/**
+ * Write a PCIe config space register indirectly. This is used for
+ * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
+ *
+ * @pcie_port: PCIe port to write to
+ * @cfg_offset: Address to write
+ * @val: Value to write
+ */
+static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
+ uint32_t val)
+{
+ union cvmx_pescx_cfg_wr pescx_cfg_wr;
+ pescx_cfg_wr.u64 = 0;
+ pescx_cfg_wr.s.addr = cfg_offset;
+ pescx_cfg_wr.s.data = val;
+ cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
+}
+
+/**
+ * Build a PCIe config space request address for a device
+ *
+ * @pcie_port: PCIe port to access
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns 64bit Octeon IO address
+ */
+static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
+ int dev, int fn, int reg)
+{
+ union cvmx_pcie_address pcie_addr;
+ union cvmx_pciercx_cfg006 pciercx_cfg006;
+
+ pciercx_cfg006.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
+ if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
+ return 0;
+
+ pcie_addr.u64 = 0;
+ pcie_addr.config.upper = 2;
+ pcie_addr.config.io = 1;
+ pcie_addr.config.did = 3;
+ pcie_addr.config.subdid = 1;
+ pcie_addr.config.es = 1;
+ pcie_addr.config.port = pcie_port;
+ pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
+ pcie_addr.config.bus = bus;
+ pcie_addr.config.dev = dev;
+ pcie_addr.config.func = fn;
+ pcie_addr.config.reg = reg;
+ return pcie_addr.u64;
+}
+
+/**
+ * Read 8bits from a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns Result of the read
+ */
+static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
+ int fn, int reg)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return cvmx_read64_uint8(address);
+ else
+ return 0xff;
+}
+
+/**
+ * Read 16bits from a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns Result of the read
+ */
+static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
+ int fn, int reg)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return le16_to_cpu(cvmx_read64_uint16(address));
+ else
+ return 0xffff;
+}
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ *
+ * Returns Result of the read
+ */
+static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
+ int fn, int reg)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ return le32_to_cpu(cvmx_read64_uint32(address));
+ else
+ return 0xffffffff;
+}
+
+/**
+ * Write 8bits to a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ * @val: Value to write
+ */
+static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
+ int reg, uint8_t val)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint8(address, val);
+}
+
+/**
+ * Write 16bits to a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ * @val: Value to write
+ */
+static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
+ int reg, uint16_t val)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint16(address, cpu_to_le16(val));
+}
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @pcie_port: PCIe port the device is on
+ * @bus: Sub bus
+ * @dev: Device ID
+ * @fn: Device sub function
+ * @reg: Register to access
+ * @val: Value to write
+ */
+static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn,
+ int reg, uint32_t val)
+{
+ uint64_t address =
+ __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
+ if (address)
+ cvmx_write64_uint32(address, cpu_to_le32(val));
+}
+
+/**
+ * Initialize the RC config space CSRs
+ *
+ * @pcie_port: PCIe port to initialize
+ */
+static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
+{
+ union cvmx_pciercx_cfg030 pciercx_cfg030;
+ union cvmx_npei_ctl_status2 npei_ctl_status2;
+ union cvmx_pciercx_cfg070 pciercx_cfg070;
+ union cvmx_pciercx_cfg001 pciercx_cfg001;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_pciercx_cfg006 pciercx_cfg006;
+ union cvmx_pciercx_cfg008 pciercx_cfg008;
+ union cvmx_pciercx_cfg009 pciercx_cfg009;
+ union cvmx_pciercx_cfg010 pciercx_cfg010;
+ union cvmx_pciercx_cfg011 pciercx_cfg011;
+ union cvmx_pciercx_cfg035 pciercx_cfg035;
+ union cvmx_pciercx_cfg075 pciercx_cfg075;
+ union cvmx_pciercx_cfg034 pciercx_cfg034;
+
+ /* Max Payload Size (PCIE*_CFG030[MPS]) */
+ /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
+ /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
+ /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
+ pciercx_cfg030.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
+ /*
+ * Max payload size = 128 bytes for best Octeon DMA
+ * performance.
+ */
+ pciercx_cfg030.s.mps = 0;
+ /*
+ * Max read request size = 128 bytes for best Octeon DMA
+ * performance.
+ */
+ pciercx_cfg030.s.mrrs = 0;
+ /* Enable relaxed ordering. */
+ pciercx_cfg030.s.ro_en = 1;
+ /* Enable no snoop. */
+ pciercx_cfg030.s.ns_en = 1;
+ /* Correctable error reporting enable. */
+ pciercx_cfg030.s.ce_en = 1;
+ /* Non-fatal error reporting enable. */
+ pciercx_cfg030.s.nfe_en = 1;
+ /* Fatal error reporting enable. */
+ pciercx_cfg030.s.fe_en = 1;
+ /* Unsupported request reporting enable. */
+ pciercx_cfg030.s.ur_en = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port),
+ pciercx_cfg030.u32);
+
+ /*
+ * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match
+ * PCIE*_CFG030[MPS]
+ *
+ * Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not
+ * exceed PCIE*_CFG030[MRRS].
+ */
+ npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
+ /* Max payload size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mps = 0;
+ /* Max read request size = 128 bytes for best Octeon DMA performance */
+ npei_ctl_status2.s.mrrs = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
+
+ /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
+ pciercx_cfg070.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
+ pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
+ pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port),
+ pciercx_cfg070.u32);
+
+ /*
+ * Access Enables (PCIE*_CFG001[MSAE,ME]) ME and MSAE should
+ * always be set.
+ *
+ * Interrupt Disable (PCIE*_CFG001[I_DIS]) System Error
+ * Message Enable (PCIE*_CFG001[SEE])
+ */
+ pciercx_cfg001.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
+ pciercx_cfg001.s.msae = 1; /* Memory space enable. */
+ pciercx_cfg001.s.me = 1; /* Bus master enable. */
+ pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
+ pciercx_cfg001.s.see = 1; /* SERR# enable */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port),
+ pciercx_cfg001.u32);
+
+ /* Advanced Error Recovery Message Enables */
+ /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
+ /* Use CVMX_PCIERCX_CFG067 hardware default */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
+
+ /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
+ pciercx_cfg032.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port),
+ pciercx_cfg032.u32);
+
+ /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */
+
+ /*
+ * Link Width Mode (PCIERCn_CFG452[LME]) - Set during
+ * cvmx_pcie_rc_initialize_link()
+ *
+ * Primary Bus Number (PCIERCn_CFG006[PBNUM])
+ *
+ * We set the primary bus number to 1 so IDT bridges are
+ * happy. They don't like zero.
+ */
+ pciercx_cfg006.u32 = 0;
+ pciercx_cfg006.s.pbnum = 1;
+ pciercx_cfg006.s.sbnum = 1;
+ pciercx_cfg006.s.subbnum = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port),
+ pciercx_cfg006.u32);
+
+ /*
+ * Memory-mapped I/O BAR (PCIERCn_CFG008)
+ * Most applications should disable the memory-mapped I/O BAR by
+ * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR]
+ */
+ pciercx_cfg008.u32 = 0;
+ pciercx_cfg008.s.mb_addr = 0x100;
+ pciercx_cfg008.s.ml_addr = 0;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port),
+ pciercx_cfg008.u32);
+
+ /*
+ * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011)
+ * Most applications should disable the prefetchable BAR by setting
+ * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] <
+ * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE]
+ */
+ pciercx_cfg009.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
+ pciercx_cfg010.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
+ pciercx_cfg011.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
+ pciercx_cfg009.s.lmem_base = 0x100;
+ pciercx_cfg009.s.lmem_limit = 0;
+ pciercx_cfg010.s.umem_base = 0x100;
+ pciercx_cfg011.s.umem_limit = 0;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port),
+ pciercx_cfg009.u32);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port),
+ pciercx_cfg010.u32);
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port),
+ pciercx_cfg011.u32);
+
+ /*
+ * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE])
+ * PME Interrupt Enables (PCIERCn_CFG035[PMEIE])
+ */
+ pciercx_cfg035.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
+ /* System error on correctable error enable. */
+ pciercx_cfg035.s.secee = 1;
+ /* System error on fatal error enable. */
+ pciercx_cfg035.s.sefee = 1;
+ /* System error on non-fatal error enable. */
+ pciercx_cfg035.s.senfee = 1;
+ /* PME interrupt enable. */
+ pciercx_cfg035.s.pmeie = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port),
+ pciercx_cfg035.u32);
+
+ /*
+ * Advanced Error Recovery Interrupt Enables
+ * (PCIERCn_CFG075[CERE,NFERE,FERE])
+ */
+ pciercx_cfg075.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
+ /* Correctable error reporting enable. */
+ pciercx_cfg075.s.cere = 1;
+ /* Non-fatal error reporting enable. */
+ pciercx_cfg075.s.nfere = 1;
+ /* Fatal error reporting enable. */
+ pciercx_cfg075.s.fere = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port),
+ pciercx_cfg075.u32);
+
+ /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN],
+ * PCIERCn_CFG034[DLLS_EN,CCINT_EN])
+ */
+ pciercx_cfg034.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
+ /* Hot-plug interrupt enable. */
+ pciercx_cfg034.s.hpint_en = 1;
+ /* Data Link Layer state changed enable */
+ pciercx_cfg034.s.dlls_en = 1;
+ /* Command completed interrupt enable. */
+ pciercx_cfg034.s.ccint_en = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port),
+ pciercx_cfg034.u32);
+}
+
+/**
+ * Initialize a host mode PCIe link. This function takes a PCIe
+ * port from reset to a link up state. Software can then begin
+ * configuring the rest of the link.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Returns Zero on success
+ */
+static int __cvmx_pcie_rc_initialize_link(int pcie_port)
+{
+ uint64_t start_cycle;
+ union cvmx_pescx_ctl_status pescx_ctl_status;
+ union cvmx_pciercx_cfg452 pciercx_cfg452;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
+ union cvmx_pciercx_cfg448 pciercx_cfg448;
+
+ /* Set the lane width */
+ pciercx_cfg452.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
+ pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
+ if (pescx_ctl_status.s.qlm_cfg == 0) {
+ /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
+ pciercx_cfg452.s.lme = 0xf;
+ } else {
+ /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
+ pciercx_cfg452.s.lme = 0x7;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port),
+ pciercx_cfg452.u32);
+
+ /*
+ * CN52XX pass 1.x has an errata where length mismatches on UR
+ * responses can cause bus errors on 64bit memory
+ * reads. Turning off length error checking fixes this.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ union cvmx_pciercx_cfg455 pciercx_cfg455;
+ pciercx_cfg455.u32 =
+ cvmx_pcie_cfgx_read(pcie_port,
+ CVMX_PCIERCX_CFG455(pcie_port));
+ pciercx_cfg455.s.m_cpl_len_err = 1;
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port),
+ pciercx_cfg455.u32);
+ }
+
+ /* Lane swap needs to be manually enabled for CN52XX */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
+ pescx_ctl_status.s.lane_swp = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),
+ pescx_ctl_status.u64);
+ }
+
+ /* Bring up the link */
+ pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
+ pescx_ctl_status.s.lnk_enb = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
+
+ /*
+ * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
+ * be disabled.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+ __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
+
+ /* Wait for the link to come up */
+ cvmx_dprintf("PCIe: Waiting for port %d link\n", pcie_port);
+ start_cycle = cvmx_get_cycle();
+ do {
+ if (cvmx_get_cycle() - start_cycle >
+ 2 * cvmx_sysinfo_get()->cpu_clock_hz) {
+ cvmx_dprintf("PCIe: Port %d link timeout\n",
+ pcie_port);
+ return -1;
+ }
+ cvmx_wait(10000);
+ pciercx_cfg032.u32 =
+ cvmx_pcie_cfgx_read(pcie_port,
+ CVMX_PCIERCX_CFG032(pcie_port));
+ } while (pciercx_cfg032.s.dlla == 0);
+
+ /* Display the link status */
+ cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port,
+ pciercx_cfg032.s.nlw);
+
+ /*
+ * Update the Replay Time Limit. Empirically, some PCIe
+ * devices take a little longer to respond than expected under
+ * load. As a workaround for this we configure the Replay Time
+ * Limit to the value expected for a 512 byte MPS instead of
+ * our actual 256 byte MPS. The numbers below are directly
+ * from the PCIe spec table 3-4.
+ */
+ pciercx_cfg448.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
+ switch (pciercx_cfg032.s.nlw) {
+ case 1: /* 1 lane */
+ pciercx_cfg448.s.rtl = 1677;
+ break;
+ case 2: /* 2 lanes */
+ pciercx_cfg448.s.rtl = 867;
+ break;
+ case 4: /* 4 lanes */
+ pciercx_cfg448.s.rtl = 462;
+ break;
+ case 8: /* 8 lanes */
+ pciercx_cfg448.s.rtl = 258;
+ break;
+ }
+ cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port),
+ pciercx_cfg448.u32);
+
+ return 0;
+}
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't
+ * enumerate the bus.
+ *
+ * @pcie_port: PCIe port to initialize
+ *
+ * Returns Zero on success
+ */
+static int cvmx_pcie_rc_initialize(int pcie_port)
+{
+ int i;
+ union cvmx_ciu_soft_prst ciu_soft_prst;
+ union cvmx_pescx_bist_status pescx_bist_status;
+ union cvmx_pescx_bist_status2 pescx_bist_status2;
+ union cvmx_npei_ctl_status npei_ctl_status;
+ union cvmx_npei_mem_access_ctl npei_mem_access_ctl;
+ union cvmx_npei_mem_access_subidx mem_access_subid;
+ union cvmx_npei_dbg_data npei_dbg_data;
+ union cvmx_pescx_ctl_status2 pescx_ctl_status2;
+
+ /*
+ * Make sure we aren't trying to setup a target mode interface
+ * in host mode.
+ */
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
+ cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called "
+ "on port0, but port0 is not in host mode\n");
+ return -1;
+ }
+
+ /*
+ * Make sure a CN52XX isn't trying to bring up port 1 when it
+ * is disabled.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
+ cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() "
+ "called on port1, but port1 is "
+ "disabled\n");
+ return -1;
+ }
+ }
+
+ /*
+ * PCIe switch arbitration mode. '0' == fixed priority NPEI,
+ * PCIe0, then PCIe1. '1' == round robin.
+ */
+ npei_ctl_status.s.arb = 1;
+ /* Allow up to 0x20 config retries */
+ npei_ctl_status.s.cfg_rtry = 0x20;
+ /*
+ * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS
+ * don't reset.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ npei_ctl_status.s.p0_ntags = 0x20;
+ npei_ctl_status.s.p1_ntags = 0x20;
+ }
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
+
+ /* Bring the PCIe out of reset */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) {
+ /*
+ * The EBH5200 board swapped the PCIe reset lines on
+ * the board. As a workaround for this bug, we bring
+ * both PCIe ports out of reset at the same time
+ * instead of on separate calls. So for port 0, we
+ * bring both out of reset and do nothing on port 1.
+ */
+ if (pcie_port == 0) {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /*
+ * After a chip reset the PCIe will also be in
+ * reset. If it isn't, most likely someone is
+ * trying to init it again without a proper
+ * PCIe reset.
+ */
+ if (ciu_soft_prst.s.soft_prst == 0) {
+ /* Reset the ports */
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST,
+ ciu_soft_prst.u64);
+ ciu_soft_prst.u64 =
+ cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1,
+ ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ udelay(2000);
+ }
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ } else {
+ /*
+ * The normal case: The PCIe ports are completely
+ * separate and can be brought out of reset
+ * independently.
+ */
+ if (pcie_port)
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ else
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ /*
+ * After a chip reset the PCIe will also be in
+ * reset. If it isn't, most likely someone is trying
+ * to init it again without a proper PCIe reset.
+ */
+ if (ciu_soft_prst.s.soft_prst == 0) {
+ /* Reset the port */
+ ciu_soft_prst.s.soft_prst = 1;
+ if (pcie_port)
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1,
+ ciu_soft_prst.u64);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST,
+ ciu_soft_prst.u64);
+ /* Wait until pcie resets the ports. */
+ udelay(2000);
+ }
+ if (pcie_port) {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
+ } else {
+ ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+ ciu_soft_prst.s.soft_prst = 0;
+ cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
+ }
+ }
+
+ /*
+ * Wait for PCIe reset to complete. Due to errata PCIE-700, we
+ * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
+ * fixed number of cycles.
+ */
+ cvmx_wait(400000);
+
+ /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
+ CN52XX, so we only probe it on newer chips */
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /* Clear PCLK_RUN so we can check if the clock is running */
+ pescx_ctl_status2.u64 =
+ cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
+ pescx_ctl_status2.s.pclk_run = 1;
+ cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port),
+ pescx_ctl_status2.u64);
+ /*
+ * Now that we cleared PCLK_RUN, wait for it to be set
+ * again telling us the clock is running.
+ */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
+ union cvmx_pescx_ctl_status2,
+ pclk_run, ==, 1, 10000)) {
+ cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n",
+ pcie_port);
+ return -1;
+ }
+ }
+
+ /*
+ * Check and make sure PCIe came out of reset. If it doesn't
+ * the board probably hasn't wired the clocks up and the
+ * interface should be skipped.
+ */
+ pescx_ctl_status2.u64 =
+ cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
+ if (pescx_ctl_status2.s.pcierst) {
+ cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n",
+ pcie_port);
+ return -1;
+ }
+
+ /*
+ * Check BIST2 status. If any bits are set skip this interface. This
+ * is an attempt to catch PCIE-813 on pass 1 parts.
+ */
+ pescx_bist_status2.u64 =
+ cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
+ if (pescx_bist_status2.u64) {
+ cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this "
+ "port isn't hooked up, skipping.\n",
+ pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ pescx_bist_status.u64 =
+ cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
+ if (pescx_bist_status.u64)
+ cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n",
+ pcie_port, CAST64(pescx_bist_status.u64));
+
+ /* Initialize the config space CSRs */
+ __cvmx_pcie_rc_initialize_config_space(pcie_port);
+
+ /* Bring the link up */
+ if (__cvmx_pcie_rc_initialize_link(pcie_port)) {
+ cvmx_dprintf
+ ("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n");
+ return -1;
+ }
+
+ /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
+ npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
+ /* Allow 16 words to combine */
+ npei_mem_access_ctl.s.max_word = 0;
+ /* Wait up to 127 cycles for more data */
+ npei_mem_access_ctl.s.timer = 127;
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
+
+ /* Setup Mem access SubDIDs */
+ mem_access_subid.u64 = 0;
+ /* Port the request is sent to. */
+ mem_access_subid.s.port = pcie_port;
+ /* Due to an errata on pass 1 chips, no merging is allowed. */
+ mem_access_subid.s.nmerge = 1;
+ /* Endian-swap for Reads. */
+ mem_access_subid.s.esr = 1;
+ /* Endian-swap for Writes. */
+ mem_access_subid.s.esw = 1;
+ /* No Snoop for Reads. */
+ mem_access_subid.s.nsr = 1;
+ /* No Snoop for Writes. */
+ mem_access_subid.s.nsw = 1;
+ /* Disable Relaxed Ordering for Reads. */
+ mem_access_subid.s.ror = 0;
+ /* Disable Relaxed Ordering for Writes. */
+ mem_access_subid.s.row = 0;
+ /* PCIe Adddress Bits <63:34>. */
+ mem_access_subid.s.ba = 0;
+
+ /*
+ * Setup mem access 12-15 for port 0, 16-19 for port 1,
+ * supplying 36 bits of address space.
+ */
+ for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
+ cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i),
+ mem_access_subid.u64);
+ /* Set each SUBID to extend the addressable range */
+ mem_access_subid.s.ba += 1;
+ }
+
+ /*
+ * Disable the peer to peer forwarding register. This must be
+ * setup by the OS after it enumerates the bus and assigns
+ * addresses to the PCIe busses.
+ */
+ for (i = 0; i < 4; i++) {
+ cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
+ cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
+ }
+
+ /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
+
+ /*
+ * Disable Octeon's BAR1. It isn't needed in RC mode since
+ * BAR2 maps all of memory. BAR2 also maps 256MB-512MB into
+ * the 2nd 256MB of memory.
+ */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1);
+
+ /*
+ * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take
+ * precedence where they overlap. It also overlaps with the
+ * device addresses, so make sure the peer to peer forwarding
+ * is set right.
+ */
+ cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
+
+ /*
+ * Setup BAR2 attributes
+ *
+ * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
+ * - PTLP_RO,CTLP_RO should normally be set (except for debug).
+ * - WAIT_COM=0 will likely work for all applications.
+ *
+ * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]).
+ */
+ if (pcie_port) {
+ union cvmx_npei_ctl_port1 npei_ctl_port;
+ npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
+ npei_ctl_port.s.bar2_enb = 1;
+ npei_ctl_port.s.bar2_esx = 1;
+ npei_ctl_port.s.bar2_cax = 0;
+ npei_ctl_port.s.ptlp_ro = 1;
+ npei_ctl_port.s.ctlp_ro = 1;
+ npei_ctl_port.s.wait_com = 0;
+ npei_ctl_port.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
+ } else {
+ union cvmx_npei_ctl_port0 npei_ctl_port;
+ npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
+ npei_ctl_port.s.bar2_enb = 1;
+ npei_ctl_port.s.bar2_esx = 1;
+ npei_ctl_port.s.bar2_cax = 0;
+ npei_ctl_port.s.ptlp_ro = 1;
+ npei_ctl_port.s.ctlp_ro = 1;
+ npei_ctl_port.s.wait_com = 0;
+ npei_ctl_port.s.waitl_com = 0;
+ cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
+ }
+ return 0;
+}
+
+
+/* Above was cvmx-pcie.c, below original pcie.c */
+
+
+/**
+ * Map a PCI device to the appropriate interrupt line
+ *
+ * @dev: The Linux PCI device structure for the device to map
+ * @slot: The slot number for this device on __BUS 0__. Linux
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
+ * @pin: The PCI interrupt pin read from the device, then swizzled
+ * as it goes through each bridge.
+ * Returns Interrupt number for the device
+ */
+int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
+ u8 slot, u8 pin)
+{
+ /*
+ * The EBH5600 board with the PCI to PCIe bridge mistakenly
+ * wires the first slot for both device id 2 and interrupt
+ * A. According to the PCI spec, device id 2 should be C. The
+ * following kludge attempts to fix this.
+ */
+ if (strstr(octeon_board_type_string(), "EBH5600") &&
+ dev->bus && dev->bus->parent) {
+ /*
+ * Iterate all the way up the device chain and find
+ * the root bus.
+ */
+ while (dev->bus && dev->bus->parent)
+ dev = to_pci_dev(dev->bus->bridge);
+ /* If the root bus is number 0 and the PEX 8114 is the
+ * root, assume we are behind the miswired bus. We
+ * need to correct the swizzle level by two. Yuck.
+ */
+ if ((dev->bus->number == 0) &&
+ (dev->vendor == 0x10b5) && (dev->device == 0x8114)) {
+ /*
+ * The pin field is one based, not zero. We
+ * need to swizzle it by minus two.
+ */
+ pin = ((pin - 3) & 3) + 1;
+ }
+ }
+ /*
+ * The -1 is because pin starts with one, not zero. It might
+ * be that this equation needs to include the slot number, but
+ * I don't have hardware to check that against.
+ */
+ return pin - 1 + OCTEON_IRQ_PCI_INT0;
+}
+
+/**
+ * Read a value from configuration space
+ *
+ * @bus:
+ * @devfn:
+ * @reg:
+ * @size:
+ * @val:
+ * Returns
+ */
+static inline int octeon_pcie_read_config(int pcie_port, struct pci_bus *bus,
+ unsigned int devfn, int reg, int size,
+ u32 *val)
+{
+ union octeon_cvmemctl cvmmemctl;
+ union octeon_cvmemctl cvmmemctl_save;
+ int bus_number = bus->number;
+
+ /*
+ * We need to force the bus number to be zero on the root
+ * bus. Linux numbers the 2nd root bus to start after all
+ * buses on root 0.
+ */
+ if (bus->parent == NULL)
+ bus_number = 0;
+
+ /*
+ * PCIe only has a single device connected to Octeon. It is
+ * always device ID 0. Don't bother doing reads for other
+ * device IDs on the first segment.
+ */
+ if ((bus_number == 0) && (devfn >> 3 != 0))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+ /*
+ * The following is a workaround for the CN57XX, CN56XX,
+ * CN55XX, and CN54XX errata with PCIe config reads from non
+ * existent devices. These chips will hang the PCIe link if a
+ * config read is performed that causes a UR response.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) {
+ /*
+ * For our EBH5600 board, port 0 has a bridge with two
+ * PCI-X slots. We need a new special checks to make
+ * sure we only probe valid stuff. The PCIe->PCI-X
+ * bridge only respondes to device ID 0, function
+ * 0-1
+ */
+ if ((bus_number == 0) && (devfn >= 2))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ /*
+ * The PCI-X slots are device ID 2,3. Choose one of
+ * the below "if" blocks based on what is plugged into
+ * the board.
+ */
+#if 1
+ /* Use this option if you aren't using either slot */
+ if (bus_number == 1)
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#elif 0
+ /*
+ * Use this option if you are using the first slot but
+ * not the second.
+ */
+ if ((bus_number == 1) && (devfn >> 3 != 2))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#elif 0
+ /*
+ * Use this option if you are using the second slot
+ * but not the first.
+ */
+ if ((bus_number == 1) && (devfn >> 3 != 3))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#elif 0
+ /* Use this opion if you are using both slots */
+ if ((bus_number == 1) &&
+ !((devfn == (2 << 3)) || (devfn == (3 << 3))))
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+#endif
+
+ /*
+ * Shorten the DID timeout so bus errors for PCIe
+ * config reads from non existent devices happen
+ * faster. This allows us to continue booting even if
+ * the above "if" checks are wrong. Once one of these
+ * errors happens, the PCIe port is dead.
+ */
+ cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7);
+ cvmmemctl.u64 = cvmmemctl_save.u64;
+ cvmmemctl.s.didtto = 2;
+ __write_64bit_c0_register($11, 7, cvmmemctl.u64);
+ }
+
+ switch (size) {
+ case 4:
+ *val = cvmx_pcie_config_read32(pcie_port, bus_number,
+ devfn >> 3, devfn & 0x7, reg);
+ break;
+ case 2:
+ *val = cvmx_pcie_config_read16(pcie_port, bus_number,
+ devfn >> 3, devfn & 0x7, reg);
+ break;
+ case 1:
+ *val = cvmx_pcie_config_read8(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg);
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1))
+ __write_64bit_c0_register($11, 7, cvmmemctl_save.u64);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
+}
+
+static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 *val)
+{
+ return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
+}
+
+
+
+/**
+ * Write a value to PCI configuration space
+ *
+ * @bus:
+ * @devfn:
+ * @reg:
+ * @size:
+ * @val:
+ * Returns
+ */
+static inline int octeon_pcie_write_config(int pcie_port, struct pci_bus *bus,
+ unsigned int devfn, int reg,
+ int size, u32 val)
+{
+ int bus_number = bus->number;
+ /*
+ * We need to force the bus number to be zero on the root
+ * bus. Linux numbers the 2nd root bus to start after all
+ * busses on root 0.
+ */
+ if (bus->parent == NULL)
+ bus_number = 0;
+
+ switch (size) {
+ case 4:
+ cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg, val);
+ return PCIBIOS_SUCCESSFUL;
+ case 2:
+ cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg, val);
+ return PCIBIOS_SUCCESSFUL;
+ case 1:
+ cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
+ devfn & 0x7, reg, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+#if PCI_CONFIG_SPACE_DELAY
+ udelay(PCI_CONFIG_SPACE_DELAY);
+#endif
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
+}
+
+static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
+ int reg, int size, u32 val)
+{
+ return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
+}
+
+static struct pci_ops octeon_pcie0_ops = {
+ octeon_pcie0_read_config,
+ octeon_pcie0_write_config,
+};
+
+static struct resource octeon_pcie0_mem_resource = {
+ .name = "Octeon PCIe0 MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource octeon_pcie0_io_resource = {
+ .name = "Octeon PCIe0 IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_pcie0_controller = {
+ .pci_ops = &octeon_pcie0_ops,
+ .mem_resource = &octeon_pcie0_mem_resource,
+ .io_resource = &octeon_pcie0_io_resource,
+};
+
+static struct pci_ops octeon_pcie1_ops = {
+ octeon_pcie1_read_config,
+ octeon_pcie1_write_config,
+};
+
+static struct resource octeon_pcie1_mem_resource = {
+ .name = "Octeon PCIe1 MEM",
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource octeon_pcie1_io_resource = {
+ .name = "Octeon PCIe1 IO",
+ .flags = IORESOURCE_IO,
+};
+
+static struct pci_controller octeon_pcie1_controller = {
+ .pci_ops = &octeon_pcie1_ops,
+ .mem_resource = &octeon_pcie1_mem_resource,
+ .io_resource = &octeon_pcie1_io_resource,
+};
+
+
+/**
+ * Initialize the Octeon PCIe controllers
+ *
+ * Returns
+ */
+static int __init octeon_pcie_setup(void)
+{
+ union cvmx_npei_ctl_status npei_ctl_status;
+ int result;
+
+ /* These chips don't have PCIe */
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE))
+ return 0;
+
+ /* Point pcibios_map_irq() to the PCIe version of it */
+ octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq;
+
+ /* Use the PCIe based DMA mappings */
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE;
+
+ /*
+ * PCIe I/O range. It is based on port 0 but includes up until
+ * port 1's end.
+ */
+ set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)));
+ ioport_resource.start = 0;
+ ioport_resource.end =
+ cvmx_pcie_get_io_base_address(1) -
+ cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
+
+ npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
+ if (npei_ctl_status.s.host_mode) {
+ pr_notice("PCIe: Initializing port 0\n");
+ result = cvmx_pcie_rc_initialize(0);
+ if (result == 0) {
+ /* Memory offsets are physical addresses */
+ octeon_pcie0_controller.mem_offset =
+ cvmx_pcie_get_mem_base_address(0);
+ /* IO offsets are Mips virtual addresses */
+ octeon_pcie0_controller.io_map_base =
+ CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address
+ (0));
+ octeon_pcie0_controller.io_offset = 0;
+ /*
+ * To keep things similar to PCI, we start
+ * device addresses at the same place as PCI
+ * uisng big bar support. This normally
+ * translates to 4GB-256MB, which is the same
+ * as most x86 PCs.
+ */
+ octeon_pcie0_controller.mem_resource->start =
+ cvmx_pcie_get_mem_base_address(0) +
+ (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
+ octeon_pcie0_controller.mem_resource->end =
+ cvmx_pcie_get_mem_base_address(0) +
+ cvmx_pcie_get_mem_size(0) - 1;
+ /*
+ * Ports must be above 16KB for the ISA bus
+ * filtering in the PCI-X to PCI bridge.
+ */
+ octeon_pcie0_controller.io_resource->start = 4 << 10;
+ octeon_pcie0_controller.io_resource->end =
+ cvmx_pcie_get_io_size(0) - 1;
+ register_pci_controller(&octeon_pcie0_controller);
+ }
+ } else {
+ pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n");
+ }
+
+ /* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ union cvmx_npei_dbg_data npei_dbg_data;
+ npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
+ if (npei_dbg_data.cn52xx.qlm0_link_width)
+ return 0;
+ }
+
+ pr_notice("PCIe: Initializing port 1\n");
+ result = cvmx_pcie_rc_initialize(1);
+ if (result == 0) {
+ /* Memory offsets are physical addresses */
+ octeon_pcie1_controller.mem_offset =
+ cvmx_pcie_get_mem_base_address(1);
+ /* IO offsets are Mips virtual addresses */
+ octeon_pcie1_controller.io_map_base =
+ CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(1));
+ octeon_pcie1_controller.io_offset =
+ cvmx_pcie_get_io_base_address(1) -
+ cvmx_pcie_get_io_base_address(0);
+ /*
+ * To keep things similar to PCI, we start device
+ * addresses at the same place as PCI uisng big bar
+ * support. This normally translates to 4GB-256MB,
+ * which is the same as most x86 PCs.
+ */
+ octeon_pcie1_controller.mem_resource->start =
+ cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
+ (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
+ octeon_pcie1_controller.mem_resource->end =
+ cvmx_pcie_get_mem_base_address(1) +
+ cvmx_pcie_get_mem_size(1) - 1;
+ /*
+ * Ports must be above 16KB for the ISA bus filtering
+ * in the PCI-X to PCI bridge.
+ */
+ octeon_pcie1_controller.io_resource->start =
+ cvmx_pcie_get_io_base_address(1) -
+ cvmx_pcie_get_io_base_address(0);
+ octeon_pcie1_controller.io_resource->end =
+ octeon_pcie1_controller.io_resource->start +
+ cvmx_pcie_get_io_size(1) - 1;
+ register_pci_controller(&octeon_pcie1_controller);
+ }
+ return 0;
+}
+
+arch_initcall(octeon_pcie_setup);
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 8ace27716232..326fe7a392e8 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
}
}
-static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
yos_send_ipi_single(i, action);
}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 060d853d7b35..f61c164d1e67 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -421,7 +421,7 @@ static void __init node_mem_init(cnodeid_t node)
/*
* A node with nothing. We use it to avoid any special casing in
- * node_to_cpumask
+ * cpumask_of_node
*/
static struct node_data null_node = {
.hub = {
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index cbcd7eb83bd1..9aa8f2951df6 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
-static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
ip27_send_ipi_single(i, action);
}
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 314691648c97..47b347c992ea 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
}
-static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void bcm1480_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
bcm1480_send_ipi_single(i, action);
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index cad14003b84f..c00a5cb1128d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
-static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
sb1250_send_ipi_single(i, action);
}
diff --git a/arch/mips/vr41xx/casio-e55/setup.c b/arch/mips/vr41xx/casio-e55/setup.c
index 6d9bab890587..719f4a5b9844 100644
--- a/arch/mips/vr41xx/casio-e55/setup.c
+++ b/arch/mips/vr41xx/casio-e55/setup.c
@@ -1,7 +1,7 @@
/*
* setup.c, Setup for the CASIO CASSIOPEIA E-11/15/55/65.
*
- * Copyright (C) 2002-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
index d77c330a0d59..6346c59c9f9d 100644
--- a/arch/mips/vr41xx/common/bcu.c
+++ b/arch/mips/vr41xx/common/bcu.c
@@ -2,8 +2,8 @@
* bcu.c, Bus Control Unit routines for the NEC VR4100 series.
*
* Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com, or source@mvista.com>
- * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,11 +21,11 @@
*/
/*
* Changes:
- * MontaVista Software Inc. <yyuasa@mvista.com> or <source@mvista.com>
+ * MontaVista Software Inc. <source@mvista.com>
* - New creation, NEC VR4122 and VR4131 are supported.
* - Added support for NEC VR4111 and VR4121.
*
- * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Yoichi Yuasa <yuasa@linux-mips.org>
* - Added support for NEC VR4133.
*/
#include <linux/kernel.h>
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index ad0e8e3409d9..8ba7d04a5ec5 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -2,8 +2,8 @@
* cmu.c, Clock Mask Unit routines for the NEC VR4100 series.
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copuright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copuright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,11 +21,11 @@
*/
/*
* Changes:
- * MontaVista Software Inc. <yyuasa@mvista.com> or <source@mvista.com>
+ * MontaVista Software Inc. <source@mvista.com>
* - New creation, NEC VR4122 and VR4131 are supported.
* - Added support for NEC VR4111 and VR4121.
*
- * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Yoichi Yuasa <yuasa@linux-mips.org>
* - Added support for NEC VR4133.
*/
#include <linux/init.h>
diff --git a/arch/mips/vr41xx/common/giu.c b/arch/mips/vr41xx/common/giu.c
index 2b272f1496fe..22cc6f2100a1 100644
--- a/arch/mips/vr41xx/common/giu.c
+++ b/arch/mips/vr41xx/common/giu.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series GIU platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 3f23d9fda662..6d39e222b170 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -2,8 +2,8 @@
* icu.c, Interrupt Control Unit routines for the NEC VR4100 series.
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,11 +21,11 @@
*/
/*
* Changes:
- * MontaVista Software Inc. <yyuasa@mvista.com> or <source@mvista.com>
+ * MontaVista Software Inc. <source@mvista.com>
* - New creation, NEC VR4122 and VR4131 are supported.
* - Added support for NEC VR4111 and VR4121.
*
- * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Yoichi Yuasa <yuasa@linux-mips.org>
* - Coped with INTASSIGN of NEC VR4133.
*/
#include <linux/errno.h>
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index c64995342ba8..1386e6f081c8 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -1,7 +1,7 @@
/*
* init.c, Common initialization routines for NEC VR4100 series.
*
- * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 9cc389109b19..bef06872f012 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -1,7 +1,7 @@
/*
* Interrupt handing routines for NEC VR4100 series.
*
- * Copyright (C) 2005-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005-2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 028aaf75eb21..692b4e85b7fc 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -1,7 +1,7 @@
/*
* pmu.c, Power Management Unit routines for NEC VR4100 series.
*
- * Copyright (C) 2003-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/rtc.c b/arch/mips/vr41xx/common/rtc.c
index 9f26c14edcac..ebc5dcf0ed8e 100644
--- a/arch/mips/vr41xx/common/rtc.c
+++ b/arch/mips/vr41xx/common/rtc.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series RTC platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/siu.c b/arch/mips/vr41xx/common/siu.c
index 654dee6208be..54eae56108fb 100644
--- a/arch/mips/vr41xx/common/siu.c
+++ b/arch/mips/vr41xx/common/siu.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series SIU platform device.
*
- * Copyright (C) 2007-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/type.c b/arch/mips/vr41xx/common/type.c
index e0c1ac5e988e..ff841422b638 100644
--- a/arch/mips/vr41xx/common/type.c
+++ b/arch/mips/vr41xx/common/type.c
@@ -1,7 +1,7 @@
/*
* type.c, System type for NEC VR4100 series.
*
- * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/ibm-workpad/setup.c b/arch/mips/vr41xx/ibm-workpad/setup.c
index 9eef297eca1a..3982f378a3e6 100644
--- a/arch/mips/vr41xx/ibm-workpad/setup.c
+++ b/arch/mips/vr41xx/ibm-workpad/setup.c
@@ -1,7 +1,7 @@
/*
* setup.c, Setup for the IBM WorkPad z50.
*
- * Copyright (C) 2002-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index e5a6368559af..556cce992548 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -109,7 +109,6 @@ extern asmlinkage int gdbstub_intercept(struct pt_regs *, enum exception_code);
extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-extern asmlinkage void start_kernel(void);
#ifndef CONFIG_MN10300_CACHE_DISABLED
extern asmlinkage void gdbstub_purge_cache(void);
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index a9e2e34f69b0..cb294c244de3 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[NR_CPUS];
#define enter_lazy_tlb(mm, tsk) do {} while (0)
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, task) \
- cpu_set((cpu), (task)->cpu_vm_mask)
-#define cpu_maybe_ran_vm(cpu, task) \
- cpu_test_and_set((cpu), (task)->cpu_vm_mask)
+#define cpu_ran_vm(cpu, mm) \
+ cpumask_set_cpu((cpu), mm_cpumask(mm))
+#define cpu_maybe_ran_vm(cpu, mm) \
+ cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
#else
-#define cpu_ran_vm(cpu, task) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, task) true
+#define cpu_ran_vm(cpu, mm) do {} while (0)
+#define cpu_maybe_ran_vm(cpu, mm) true
#endif /* CONFIG_SMP */
/*
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index e58b9a46e1b1..5b43a1d51173 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -70,10 +70,6 @@ struct pci_dev;
*/
#define PCI_DMA_BUS_IS_PHYS (1)
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (0)
-
/* Return the index of the PCI controller for device. */
static inline int pci_controller_num(struct pci_dev *dev)
{
@@ -106,7 +102,18 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res,
struct pci_bus_region *region);
-#define pcibios_scan_all_fns(a, b) 0
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index fef5b434dadc..fad68616af32 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -346,10 +346,12 @@
#define __NR_inotify_init1 333
#define __NR_preadv 334
#define __NR_pwritev 335
+#define __NR_rt_tgsigqueueinfo 336
+#define __NR_perf_counter_open 337
#ifdef __KERNEL__
-#define NR_syscalls 326
+#define NR_syscalls 338
/*
* specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 7408a27199f3..e0d2563af4f2 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -722,6 +722,8 @@ ENTRY(sys_call_table)
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 335 */
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_counter_open
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index bcebcefb4ad7..166e8f9ac318 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -61,7 +61,7 @@ SECTIONS
_edata = .; /* End of data section */
}
- .data.init_task : { INIT_TASK(THREAD_SIZE); }
+ .data.init_task : { INIT_TASK_DATA(THREAD_SIZE); }
/* might get freed after init */
. = ALIGN(PAGE_SIZE);
@@ -118,6 +118,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
EXIT_CALL
+ *(.discard)
}
STABS_DEBUG
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 9038f39d9d73..06f8d5b5b0f9 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,6 +16,8 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
+ select HAVE_PERF_COUNTERS
+ select GENERIC_ATOMIC64 if !64BIT
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 7eeaff944360..8bc9e96699b2 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -222,13 +222,13 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)(i)),(v))))
-#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)(i)),(v))))
+#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
+#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
-#define atomic_add_return(i,v) (__atomic_add_return( ((int)(i)),(v)))
-#define atomic_sub_return(i,v) (__atomic_add_return(-((int)(i)),(v)))
+#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
+#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
@@ -336,7 +336,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#endif /* CONFIG_64BIT */
+#else /* CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
+#endif /* !CONFIG_64BIT */
#include <asm-generic/atomic-long.h>
diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h
index 31ad0f05af3d..f7a18f968703 100644
--- a/arch/parisc/include/asm/dma.h
+++ b/arch/parisc/include/asm/dma.h
@@ -1,5 +1,4 @@
-/* $Id: dma.h,v 1.2 1999/04/27 00:46:18 deller Exp $
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+/* asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index 7d842d699df2..64c7aa590ae5 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -233,7 +233,6 @@ static inline void pcibios_register_hba(struct pci_hba_data *x)
* rp7420/8420 boxes and then revisit this issue.
*/
#define pcibios_assign_all_busses() (1)
-#define pcibios_scan_all_fns(a, b) (0)
#define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
diff --git a/arch/parisc/include/asm/perf_counter.h b/arch/parisc/include/asm/perf_counter.h
new file mode 100644
index 000000000000..dc9e829f7013
--- /dev/null
+++ b/arch/parisc/include/asm/perf_counter.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_PARISC_PERF_COUNTER_H
+#define __ASM_PARISC_PERF_COUNTER_H
+
+/* parisc only supports software counters through this interface. */
+static inline void set_perf_counter_pending(void) { }
+
+#endif /* __ASM_PARISC_PERF_COUNTER_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 9d64df8754ba..9ce66e9d1c2b 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -18,6 +18,7 @@
#include <asm/types.h>
#include <asm/system.h>
#include <asm/percpu.h>
+
#endif /* __ASSEMBLY__ */
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
@@ -127,6 +128,8 @@ struct thread_struct {
unsigned long flags;
};
+#define task_pt_regs(tsk) ((struct pt_regs *)&((tsk)->thread.regs))
+
/* Thread struct flags. */
#define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */
#define PARISC_UAC_SIGBUS (1UL << 1)
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 21eb45a52629..2e73623feb6b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -30,7 +30,6 @@ extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#endif /* !ASSEMBLY */
diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h
index ee80c920b464..d91357bca5b4 100644
--- a/arch/parisc/include/asm/system.h
+++ b/arch/parisc/include/asm/system.h
@@ -168,8 +168,8 @@ static inline void set_eiem(unsigned long val)
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
#define __ldcw(a) ({ \
unsigned __ret; \
- __asm__ __volatile__(__LDCW " 0(%1),%0" \
- : "=r" (__ret) : "r" (a)); \
+ __asm__ __volatile__(__LDCW " 0(%2),%0" \
+ : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
__ret; \
})
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 1f6fd4fc05b9..8f1a8100bf2d 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -12,14 +12,12 @@
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
- * it on all SMP systems not just the N class. We also need to have
- * preemption disabled on uniprocessor machines, and spin_lock does that
- * nicely.
+ * it on all systems not just the N class.
*/
extern spinlock_t pa_tlb_lock;
-#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
-#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
+#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
+#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
extern void flush_tlb_all(void);
extern void flush_tlb_all_local(void *);
@@ -63,14 +61,16 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
+ unsigned long flags;
+
/* For one page, it's not worth testing the split_tlb variable */
mb();
mtsp(vma->vm_mm->context,1);
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb(addr);
pitlb(addr);
- purge_tlb_end();
+ purge_tlb_end(flags);
}
void __flush_tlb_range(unsigned long sid,
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index ef26b009dc5d..f3d3b8b012c4 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -807,8 +807,12 @@
#define __NR_dup3 (__NR_Linux + 312)
#define __NR_pipe2 (__NR_Linux + 313)
#define __NR_inotify_init1 (__NR_Linux + 314)
+#define __NR_preadv (__NR_Linux + 315)
+#define __NR_pwritev (__NR_Linux + 316)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
+#define __NR_perf_counter_open (__NR_Linux + 318)
-#define __NR_Linux_syscalls (__NR_inotify_init1 + 1)
+#define __NR_Linux_syscalls (__NR_perf_counter_open + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 837530ea32e7..b6ed34de14e1 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -1,5 +1,4 @@
-/* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -398,12 +397,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
void clear_user_page_asm(void *page, unsigned long vaddr)
{
+ unsigned long flags;
/* This function is implemented in assembly in pacache.S */
extern void __clear_user_page_asm(void *page, unsigned long vaddr);
- purge_tlb_start();
+ purge_tlb_start(flags);
__clear_user_page_asm(page, vaddr);
- purge_tlb_end();
+ purge_tlb_end(flags);
}
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
@@ -444,20 +444,24 @@ extern void clear_user_page_asm(void *page, unsigned long vaddr);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
+ unsigned long flags;
+
purge_kernel_dcache_page((unsigned long)page);
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(page);
- purge_tlb_end();
+ purge_tlb_end(flags);
clear_user_page_asm(page, vaddr);
}
EXPORT_SYMBOL(clear_user_page);
void flush_kernel_dcache_page_addr(void *addr)
{
+ unsigned long flags;
+
flush_kernel_dcache_page_asm(addr);
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(addr);
- purge_tlb_end();
+ purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
@@ -490,8 +494,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
+ unsigned long flags;
+
mtsp(sid, 1);
- purge_tlb_start();
+ purge_tlb_start(flags);
if (split_tlb) {
while (npages--) {
pdtlb(start);
@@ -504,7 +510,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
start += PAGE_SIZE;
}
}
- purge_tlb_end();
+ purge_tlb_end(flags);
}
}
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index bd1f7f1ff74e..d228d8237879 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -170,23 +170,27 @@ static void __init pagezero_memconfig(void)
static int __init
pat_query_module(ulong pcell_loc, ulong mod_index)
{
- pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
unsigned long bytecnt;
unsigned long temp; /* 64-bit scratch value */
long status; /* PDC return value status */
struct parisc_device *dev;
+ pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ panic("couldn't allocate memory for PDC_PAT_CELL!");
+
/* return cell module (PA or Processor view) */
status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
- PA_VIEW, &pa_pdc_cell);
+ PA_VIEW, pa_pdc_cell);
if (status != PDC_OK) {
/* no more cell modules or error */
return status;
}
- temp = pa_pdc_cell.cba;
- dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
+ temp = pa_pdc_cell->cba;
+ dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
if (!dev) {
return PDC_OK;
}
@@ -203,8 +207,8 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* save generic info returned from the call */
/* REVISIT: who is the consumer of this? not sure yet... */
- dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */
- dev->pmod_loc = pa_pdc_cell.mod_location;
+ dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
+ dev->pmod_loc = pa_pdc_cell->mod_location;
register_parisc_device(dev); /* advertise device */
@@ -216,14 +220,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
case PAT_ENTITY_PROC:
printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
- pa_pdc_cell.mod[0]);
+ pa_pdc_cell->mod[0]);
break;
case PAT_ENTITY_MEM:
printk(KERN_DEBUG
"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
- pa_pdc_cell.mod[0], pa_pdc_cell.mod[1],
- pa_pdc_cell.mod[2]);
+ pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
+ pa_pdc_cell->mod[2]);
break;
case PAT_ENTITY_CA:
printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
@@ -243,23 +247,26 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
print_ranges:
pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
IO_VIEW, &io_pdc_cell);
- printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]);
- for (i = 0; i < pa_pdc_cell.mod[1]; i++) {
+ printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
+ for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
printk(KERN_DEBUG
" PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
- i, pa_pdc_cell.mod[2 + i * 3], /* type */
- pa_pdc_cell.mod[3 + i * 3], /* start */
- pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
+ i, pa_pdc_cell->mod[2 + i * 3], /* type */
+ pa_pdc_cell->mod[3 + i * 3], /* start */
+ pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
printk(KERN_DEBUG
" IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
- i, io_pdc_cell.mod[2 + i * 3], /* type */
- io_pdc_cell.mod[3 + i * 3], /* start */
- io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
+ i, io_pdc_cell->mod[2 + i * 3], /* type */
+ io_pdc_cell->mod[3 + i * 3], /* start */
+ io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
}
printk(KERN_DEBUG "\n");
break;
}
#endif /* DEBUG_PAT */
+
+ kfree(pa_pdc_cell);
+
return PDC_OK;
}
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 8007f1e65729..330f536a9324 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
- cpumask_setall(&irq_desc[irq].affinity);
+ cpumask_setall(irq_desc[irq].affinity);
return -EINVAL;
}
@@ -138,13 +138,13 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
if (cpu_dest < 0)
return -1;
- cpumask_copy(&irq_desc[irq].affinity, dest);
+ cpumask_copy(irq_desc[irq].affinity, dest);
return 0;
}
#endif
-static struct hw_interrupt_type cpu_interrupt_type = {
+static struct irq_chip cpu_interrupt_type = {
.typename = "CPU",
.startup = cpu_startup_irq,
.shutdown = cpu_disable_irq,
@@ -299,7 +299,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu));
+ cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
@@ -356,7 +356,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
- cpumask_copy(&dest, &irq_desc[irq].affinity);
+ cpumask_copy(&dest, irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 7d927eac932b..c07f618ff7da 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t * pte,
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
+ unsigned long flags;
+
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
- purge_tlb_end();
+ purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE;
@@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
+ unsigned long flags;
pte_t page = *pte;
+
pte_clear(&init_mm, vaddr, pte);
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
- purge_tlb_end();
+ purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
pte++;
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 6936386c9861..f7064abc3bb6 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -1,5 +1,4 @@
-/* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e09d0f7fb6b0..c8fb61ed32f4 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -1,5 +1,4 @@
-/* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $
- *
+/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
@@ -121,22 +120,28 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
if (is_pdc_pat()) {
ulong status;
unsigned long bytecnt;
- pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
struct pdc_pat_cpu_num cpu_info;
#endif
+ pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ panic("couldn't allocate memory for PDC_PAT_CELL!");
+
status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
- dev->mod_index, PA_VIEW, &pa_pdc_cell);
+ dev->mod_index, PA_VIEW, pa_pdc_cell);
BUG_ON(PDC_OK != status);
/* verify it's the same as what do_pat_inventory() found */
- BUG_ON(dev->mod_info != pa_pdc_cell.mod_info);
- BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location);
+ BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
+ BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
+
+ txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */
- txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */
+ kfree(pa_pdc_cell);
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 82131ca8e05c..cb71f3dac995 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -1,5 +1,4 @@
-/* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $
- *
+/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 1adb40c81669..92a0acaa0d12 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -174,68 +174,6 @@ asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
return ret;
}
-/*** copied from mips64 ***/
-/*
- * Ooo, nasty. We need here to frob 32-bit unsigned longs to
- * 64-bit unsigned longs.
- */
-
-static inline int
-get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
-{
- n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
- if (ufdset) {
- unsigned long odd;
-
- if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32)))
- return -EFAULT;
-
- odd = n & 1UL;
- n &= ~1UL;
- while (n) {
- unsigned long h, l;
- __get_user(l, ufdset);
- __get_user(h, ufdset+1);
- ufdset += 2;
- *fdset++ = h << 32 | l;
- n -= 2;
- }
- if (odd)
- __get_user(*fdset, ufdset);
- } else {
- /* Tricky, must clear full unsigned long in the
- * kernel fdset at the end, this makes sure that
- * actually happens.
- */
- memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32));
- }
- return 0;
-}
-
-static inline void
-set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
-{
- unsigned long odd;
- n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
-
- if (!ufdset)
- return;
-
- odd = n & 1UL;
- n &= ~1UL;
- while (n) {
- unsigned long h, l;
- l = *fdset++;
- h = l >> 32;
- __put_user(l, ufdset);
- __put_user(h, ufdset+1);
- ufdset += 2;
- n -= 2;
- }
- if (odd)
- __put_user(*fdset, ufdset);
-}
-
struct msgbuf32 {
int mtype;
char mtext[1];
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 03b9a01bc16c..cf145eb026b3 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -413,6 +413,10 @@
ENTRY_SAME(dup3)
ENTRY_SAME(pipe2)
ENTRY_SAME(inotify_init1)
+ ENTRY_COMP(preadv) /* 315 */
+ ENTRY_COMP(pwritev)
+ ENTRY_COMP(rt_tgsigqueueinfo)
+ ENTRY_SAME(perf_counter_open)
/* Nothing yet */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index d4dd05674c62..a79c6f9e7e2c 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -56,9 +56,9 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */
*/
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
- unsigned long now;
+ unsigned long now, now2;
unsigned long next_tick;
- unsigned long cycles_elapsed, ticks_elapsed;
+ unsigned long cycles_elapsed, ticks_elapsed = 1;
unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
@@ -71,44 +71,24 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
/* Initialize next_tick to the expected tick time. */
next_tick = cpuinfo->it_value;
- /* Get current interval timer.
- * CR16 reads as 64 bits in CPU wide mode.
- * CR16 reads as 32 bits in CPU narrow mode.
- */
+ /* Get current cycle counter (Control Register 16). */
now = mfctl(16);
cycles_elapsed = now - next_tick;
- if ((cycles_elapsed >> 5) < cpt) {
+ if ((cycles_elapsed >> 6) < cpt) {
/* use "cheap" math (add/subtract) instead
* of the more expensive div/mul method
*/
cycles_remainder = cycles_elapsed;
- ticks_elapsed = 1;
while (cycles_remainder > cpt) {
cycles_remainder -= cpt;
ticks_elapsed++;
}
} else {
+ /* TODO: Reduce this to one fdiv op */
cycles_remainder = cycles_elapsed % cpt;
- ticks_elapsed = 1 + cycles_elapsed / cpt;
- }
-
- /* Can we differentiate between "early CR16" (aka Scenario 1) and
- * "long delay" (aka Scenario 3)? I don't think so.
- *
- * We expected timer_interrupt to be delivered at least a few hundred
- * cycles after the IT fires. But it's arbitrary how much time passes
- * before we call it "late". I've picked one second.
- */
- if (unlikely(ticks_elapsed > HZ)) {
- /* Scenario 3: very long delay? bad in any case */
- printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
- " cycles %lX rem %lX "
- " next/now %lX/%lX\n",
- cpu,
- cycles_elapsed, cycles_remainder,
- next_tick, now );
+ ticks_elapsed += cycles_elapsed / cpt;
}
/* convert from "division remainder" to "remainder of clock tick" */
@@ -122,18 +102,56 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
cpuinfo->it_value = next_tick;
- /* Skip one clocktick on purpose if we are likely to miss next_tick.
- * We want to avoid the new next_tick being less than CR16.
- * If that happened, itimer wouldn't fire until CR16 wrapped.
- * We'll catch the tick we missed on the tick after that.
+ /* Program the IT when to deliver the next interrupt.
+ * Only bottom 32-bits of next_tick are writable in CR16!
*/
- if (!(cycles_remainder >> 13))
- next_tick += cpt;
-
- /* Program the IT when to deliver the next interrupt. */
- /* Only bottom 32-bits of next_tick are written to cr16. */
mtctl(next_tick, 16);
+ /* Skip one clocktick on purpose if we missed next_tick.
+ * The new CR16 must be "later" than current CR16 otherwise
+ * itimer would not fire until CR16 wrapped - e.g 4 seconds
+ * later on a 1Ghz processor. We'll account for the missed
+ * tick on the next timer interrupt.
+ *
+ * "next_tick - now" will always give the difference regardless
+ * if one or the other wrapped. If "now" is "bigger" we'll end up
+ * with a very large unsigned number.
+ */
+ now2 = mfctl(16);
+ if (next_tick - now2 > cpt)
+ mtctl(next_tick+cpt, 16);
+
+#if 1
+/*
+ * GGG: DEBUG code for how many cycles programming CR16 used.
+ */
+ if (unlikely(now2 - now > 0x3000)) /* 12K cycles */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
+ " cyc %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu, now2 - now, cycles_elapsed, cycles_remainder,
+ next_tick, now );
+#endif
+
+ /* Can we differentiate between "early CR16" (aka Scenario 1) and
+ * "long delay" (aka Scenario 3)? I don't think so.
+ *
+ * Timer_interrupt will be delivered at least a few hundred cycles
+ * after the IT fires. But it's arbitrary how much time passes
+ * before we call it "late". I've picked one second.
+ *
+ * It's important NO printk's are between reading CR16 and
+ * setting up the next value. May introduce huge variance.
+ */
+ if (unlikely(ticks_elapsed > HZ)) {
+ /* Scenario 3: very long delay? bad in any case */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
+ " cycles %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu,
+ cycles_elapsed, cycles_remainder,
+ next_tick, now );
+ }
/* Done mucking with unreliable delivery of interrupts.
* Go do system house keeping.
@@ -173,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
/* clock source code */
-static cycle_t read_cr16(void)
+static cycle_t read_cr16(struct clocksource *cs)
{
return get_cycles();
}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fd2cc4fd2b65..ccf58341845a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -240,6 +240,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
+ *(.discard)
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
* for static binaries
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index 462696d30d3b..ae66d31f9ecf 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -13,8 +13,6 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
- * $Id: checksum.c,v 1.3 1997/12/01 17:57:34 ralf Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index bbda909c866e..abf41f4632a9 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -405,7 +405,7 @@ byte_copy:
unaligned_copy:
/* possibly we are aligned on a word, but not on a double... */
- if (likely(t1 & (sizeof(unsigned int)-1)) == 0) {
+ if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
t2 = src & (sizeof(unsigned int) - 1);
if (unlikely(t2 != 0)) {
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
index 66c8a9f6a27e..3ca1c6149218 100644
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -40,7 +40,7 @@
* END_DESC
*/
-
+#include <linux/kernel.h>
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index bfb6dd6ab380..c6afbfc95770 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -1,5 +1,4 @@
-/* $Id: fault.c,v 1.5 2000/01/26 16:20:29 jsm Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 4356ceb1e366..b0831d9e35cb 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -370,34 +370,22 @@ static void __init setup_bootmem(void)
void free_initmem(void)
{
- unsigned long addr, init_begin, init_end;
-
- printk(KERN_INFO "Freeing unused kernel memory: ");
+ unsigned long addr;
+ unsigned long init_begin = (unsigned long)__init_begin;
+ unsigned long init_end = (unsigned long)__init_end;
#ifdef CONFIG_DEBUG_KERNEL
/* Attempt to catch anyone trying to execute code here
* by filling the page with BRK insns.
- *
- * If we disable interrupts for all CPUs, then IPI stops working.
- * Kinda breaks the global cache flushing.
*/
- local_irq_disable();
-
- memset(__init_begin, 0x00,
- (unsigned long)__init_end - (unsigned long)__init_begin);
-
- flush_data_cache();
- asm volatile("sync" : : );
- flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
- asm volatile("sync" : : );
-
- local_irq_enable();
+ memset((void *)init_begin, 0x00, init_end - init_begin);
+ flush_icache_range(init_begin, init_end);
#endif
/* align __init_begin and __init_end to page size,
ignoring linker script where we might have tried to save RAM */
- init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
- init_end = PAGE_ALIGN((unsigned long)(__init_end));
+ init_begin = PAGE_ALIGN(init_begin);
+ init_end = PAGE_ALIGN(init_end);
for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
@@ -409,7 +397,8 @@ void free_initmem(void)
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
- printk("%luk freed\n", (init_end - init_begin) >> 10);
+ printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
+ (init_end - init_begin) >> 10);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bf6cedfa05db..61bbffa2fe60 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -46,6 +46,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
bool
default y
+config HAVE_LEGACY_PER_CPU_AREA
+ def_bool PPC64
+
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
@@ -62,7 +65,6 @@ config HAVE_LATENCYTOP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
bool
- depends on PPC64
default y
config LOCKDEP_SUPPORT
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 2f50acd11a60..3d80c3e9cf60 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -36,3 +36,13 @@ zImage.pseries
zconf.h
zlib.h
zutil.h
+fdt.c
+fdt.h
+fdt_ro.c
+fdt_rw.c
+fdt_strerror.c
+fdt_sw.c
+fdt_wip.c
+libfdt.h
+libfdt_internal.h
+
diff --git a/arch/powerpc/boot/dts/amigaone.dts b/arch/powerpc/boot/dts/amigaone.dts
index 26549fca2ed4..49ac36b16dd7 100644
--- a/arch/powerpc/boot/dts/amigaone.dts
+++ b/arch/powerpc/boot/dts/amigaone.dts
@@ -70,8 +70,8 @@
devsel-speed = <0x00000001>;
min-grant = <0>;
max-latency = <0>;
- /* First 64k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
- ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00010000>;
+ /* First 4k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
+ ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
interrupt-parent = <&i8259>;
#interrupt-cells = <2>;
#address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index a8dcb018c4a5..a680165292f2 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -253,6 +253,7 @@
/* Filled in by U-Boot */
clock-frequency = <0>;
status = "disabled";
+ sdhci,1-bit-only;
};
crypto@30000 {
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 2ff798744c1d..7685ffde8821 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -598,8 +598,6 @@ typedef struct risc_timer_pram {
#define CICR_IEN ((uint)0x00000080) /* Int. enable */
#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
-#define IMAP_ADDR (get_immrbase())
-
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
#define CPM_PIN_PRIMARY 0
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 7d2277cef09a..e3e06e0f7fc0 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -30,4 +30,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
return ad->of_node;
}
+struct pdev_archdata {
+};
+
#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3d9e887c3c0c..b44aaabdd1a6 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
size, direction);
}
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle,
0, size, direction);
}
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle,
offset, size, direction);
}
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
size, direction);
}
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 684a73f4324f..a74c4ee6c020 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -22,9 +22,7 @@
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/highmem.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_atomic_prot(struct page *page, enum km_type type,
+ pgprot_t prot);
+extern void kunmap_atomic(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page)
{
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
-{
- unsigned int idx;
- unsigned long vaddr;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- debug_kmap_atomic(type);
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
- local_flush_tlb_page(NULL, vaddr);
-
- return (void*) vaddr;
-}
-
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
return kmap_atomic_prot(page, type, kmap_prot);
}
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- return;
- }
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
-#endif
- pagefault_enable();
-}
-
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
+
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 867ab8ed69b3..8b505eaaa38a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
#if defined(CONFIG_BOOKE)
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
#else
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) mtmsr(flags)
+#define raw_local_irq_restore(flags) mtmsr(flags)
#endif
-static inline void local_irq_disable(void)
+static inline void raw_local_irq_disable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 0": : :"memory");
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
#endif
}
-static inline void local_irq_enable(void)
+static inline void raw_local_irq_enable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 1": : :"memory");
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
#endif
}
-static inline void local_irq_save_ptr(unsigned long *flags)
+static inline void raw_local_irq_save_ptr(unsigned long *flags)
{
unsigned long msr;
msr = mfmsr();
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#endif
}
-#define local_save_flags(flags) ((flags) = mfmsr())
-#define local_irq_save(flags) local_irq_save_ptr(&flags)
-#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_local_save_flags(flags) ((flags) = mfmsr())
+#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
+#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
-#define hard_irq_enable() local_irq_enable()
-#define hard_irq_disable() local_irq_disable()
+#define hard_irq_disable() raw_local_irq_disable()
static inline int irqs_disabled_flags(unsigned long flags)
{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index dfdf13c9fefd..c9c930ed11d7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,7 +34,8 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
/* We don't currently support large pages. */
-#define KVM_PAGES_PER_HPAGE (1<<31)
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
struct kvm;
struct kvm_run;
@@ -153,7 +154,6 @@ struct kvm_vcpu_arch {
u32 pid;
u32 swap_pid;
- u32 pvr;
u32 ccr0;
u32 ccr1;
u32 dbcr0;
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index d9483c504d2d..36057c821ff4 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -40,7 +40,6 @@ struct pci_dev;
*/
#define pcibios_assign_all_busses() \
(ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
-#define pcibios_scan_all_fns(a, b) 0
static inline void pcibios_set_master(struct pci_dev *dev)
{
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index 8ccd4e155768..0ea0639fcf75 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -61,6 +61,8 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+#define PERF_COUNTER_INDEX_OFFSET 1
+
/*
* Only override the default definitions in include/linux/perf_counter.h
* if we have hardware PMU support.
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index e05d26fa372f..82b72207c51c 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,8 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), pte_val(*((p) + PTRS_PER_PTE)) })
+ (e), ((e) & _PAGE_COMBO) ? \
+ (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define __rpte_to_pte(r) ((r).pte)
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c12339b304..168fce726201 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff);
}
+extern void __cpuinit rtas_give_timebase(void);
+extern void __cpuinit rtas_take_timebase(void);
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c25f73d1d842..435b58a848da 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -146,7 +146,7 @@ extern void smp_generic_take_timebase(void);
extern struct smp_ops_t *smp_ops;
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 054a16d68082..0b3a0183df3a 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -17,11 +17,6 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
-static inline cpumask_t node_to_cpumask(int node)
-{
- return numa_cpumask_lookup_table[node];
-}
-
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
int of_node_to_nid(struct device_node *device);
@@ -36,11 +31,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
}
#endif
-#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
- CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
-
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
@@ -105,8 +95,6 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64
#include <asm/smp.h>
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
-#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4dd38f129153..3cadba60a4b6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -191,11 +191,49 @@ transfer_to_handler_cont:
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lis r12,reenable_mmu@h
+ ori r12,r12,reenable_mmu@l
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r10
+ SYNC
+ RFI
+reenable_mmu: /* re-enable mmu so we can */
+ mfmsr r10
+ lwz r12,_MSR(r1)
+ xor r10,r10,r12
+ andi. r10,r10,MSR_EE /* Did EE change? */
+ beq 1f
+
+ /* Save handler and return address into the 2 unused words
+ * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
+ * else can be recovered from the pt_regs except r3 which for
+ * normal interrupts has been set to pt_regs and for syscalls
+ * is an argument, so we temporarily use ORIG_GPR3 to save it
+ */
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,ORIG_GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r0,GPR0(r1)
+ lwz r3,ORIG_GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ lwz r9,8(r1)
+ lwz r11,12(r1)
+1: mtctr r11
+ mtlr r9
+ bctr /* jump to handler */
+#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_6xx) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Return from syscalls can (and generally will) hard enable
+ * interrupts. You aren't supposed to call a syscall with
+ * interrupts disabled in the first place. However, to ensure
+ * that we get it right vs. lockdep if it happens, we force
+ * that hard enable here with appropriate tracing if we see
+ * that we have been called with interrupts off
+ */
+ mfmsr r11
+ andi. r12,r11,MSR_EE
+ bne+ 1f
+ /* We came in with interrupts disabled, we enable them now */
+ bl trace_hardirqs_on
+ mfmsr r11
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ ori r11,r11,MSR_EE
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ mtmsr r11
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
@@ -275,6 +338,7 @@ ret_from_syscall:
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ /* Note: We don't bother telling lockdep about it */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
@@ -288,6 +352,19 @@ ret_from_syscall:
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
syscall_exit_cont:
+ lwz r8,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* If we are going to return from the syscall with interrupts
+ * off, we trace that here. It shouldn't happen though but we
+ * want to catch the bugger if it does right ?
+ */
+ andi. r10,r8,MSR_EE
+ bne+ 1f
+ stw r3,GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r3,GPR3(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
- lwz r8,_MSR(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
@@ -394,7 +470,9 @@ syscall_exit_work:
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq ret_from_except
- /* Re-enable interrupts */
+ /* Re-enable interrupts. There is no need to trace that with
+ * lockdep as we are supposed to have IRQs on at this point
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10)
@@ -705,6 +783,7 @@ ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
+ /* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
@@ -744,11 +823,24 @@ resume_kernel:
beq+ restore
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+ * preempt_schedule_irq with IRQs off, so we inform lockdep
+ * now that we -did- turn them off already
+ */
+ bl trace_hardirqs_off
+#endif
1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+ */
+ bl trace_hardirqs_on
+#endif
#else
resume_kernel:
#endif /* CONFIG_PREEMPT */
@@ -765,6 +857,28 @@ restore:
stw r6,icache_44x_need_flush@l(r4)
1:
#endif /* CONFIG_44x */
+
+ lwz r9,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep doesn't know about the fact that IRQs are temporarily turned
+ * off in this assembly code while peeking at TI_FLAGS() and such. However
+ * we need to inform it if the exception turned interrupts off, and we
+ * are about to trun them back on.
+ *
+ * The problem here sadly is that we don't know whether the exceptions was
+ * one that turned interrupts off or not. So we always tell lockdep about
+ * turning them on here when we go back to wherever we came from with EE
+ * on, even if that may meen some redudant calls being tracked. Maybe later
+ * we could encode what the exception did somewhere or test the exception
+ * type in the pt_regs but that sounds overkill
+ */
+ andi. r10,r9,MSR_EE
+ beq 1f
+ bl trace_hardirqs_on
+ lwz r9,_MSR(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
- lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
+ /* Note: We don't need to inform lockdep that we are enabling
+ * interrupts here. As far as it knows, they are already enabled
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
+ /* Note: And we don't tell it we are disabling them again
+ * neither. Those disable/enable cycles used to peek at
+ * TI_FLAGS aren't advertised.
+ */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 48469463f89e..fc2132942754 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -1124,9 +1124,8 @@ mmu_off:
RFI
/*
- * Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to PAGE_OFFSET. From this point on we can't safely
- * call OF any more.
+ * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
+ * (we keep one for debugging) and on others, we use one 256M BAT.
*/
initial_bats:
lis r11,PAGE_OFFSET@h
@@ -1136,12 +1135,16 @@ initial_bats:
bne 4f
ori r11,r11,4 /* set up BAT registers for 601 */
li r8,0x7f /* valid, block length = 8MB */
- oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
- oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
mtspr SPRN_IBAT0L,r8 /* lower BAT register */
- mtspr SPRN_IBAT1U,r9
- mtspr SPRN_IBAT1L,r10
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT1U,r11
+ mtspr SPRN_IBAT1L,r8
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT2U,r11
+ mtspr SPRN_IBAT2L,r8
isync
blr
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index fa983a59c4ce..a359cb08e900 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.archdata.of_node = np;
if (bus_id)
- dev_set_name(&dev->dev, bus_id);
+ dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(dev);
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 5d755ef7ac8f..5a9f5cbd40a4 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -358,6 +358,7 @@ static struct power_pmu power7_pmu = {
.get_constraint = power7_get_constraint,
.get_alternatives = power7_get_alternatives,
.disable_pmc = power7_disable_pmc,
+ .flags = PPMU_ALT_SIPR,
.n_generic = ARRAY_SIZE(power7_generic_events),
.generic_events = power7_generic_events,
.cache_events = &power7_cache_events,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3e7135bbe40f..892a9f2e6d76 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs)
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
- printk("\n" KERN_INFO "GPR%02d: ", i);
+ printk("\nGPR%02d: ", i);
printk(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs))
break;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee4c7609b649..c434823b8c83 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -38,9 +38,10 @@
#include <asm/syscalls.h>
#include <asm/smp.h>
#include <asm/atomic.h>
+#include <asm/time.h>
struct rtas_t rtas = {
- .lock = SPIN_LOCK_UNLOCKED
+ .lock = __RAW_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf;
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook);
+/* RTAS use home made raw locking instead of spin_lock_irqsave
+ * because those can be called from within really nasty contexts
+ * such as having the timebase stopped which would lockup with
+ * normal locks and spinlock debugging enabled
+ */
+static unsigned long lock_rtas(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ __raw_spin_lock_flags(&rtas.lock, flags);
+ return flags;
+}
+
+static void unlock_rtas(unsigned long flags)
+{
+ __raw_spin_unlock(&rtas.lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
/*
* call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which
@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c)
if (!rtas.base)
return;
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
args->token = 10;
args->nargs = 1;
@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c)
enter_rtas(__pa(args));
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
}
static void call_rtas_display_status_delay(char c)
@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
- /* Gotta do something different here, use global lock for now... */
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
rtas_args = &rtas.args;
rtas_args->token = token;
@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
outputs[i] = rtas_args->rets[i+1];
ret = (nret > 0)? rtas_args->rets[0]: 0;
- /* Gotta do something different here, use global lock for now... */
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
buff_copy = get_errorlog_buffer();
- spin_lock_irqsave(&rtas.lock, flags);
+ flags = lock_rtas();
rtas.args = args;
enter_rtas(__pa(&rtas.args));
@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (args.rets[0] == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
- spin_unlock_irqrestore(&rtas.lock, flags);
+ unlock_rtas(flags);
if (buff_copy) {
if (errbuf)
@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node,
/* break now */
return 1;
}
+
+static raw_spinlock_t timebase_lock;
+static u64 timebase = 0;
+
+void __cpuinit rtas_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
+ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
+ timebase = get_tb();
+ __raw_spin_unlock(&timebase_lock);
+
+ while (timebase)
+ barrier();
+ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
+ local_irq_restore(flags);
+}
+
+void __cpuinit rtas_take_timebase(void)
+{
+ while (!timebase)
+ barrier();
+ __raw_spin_lock(&timebase_lock);
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ __raw_spin_unlock(&timebase_lock);
+}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 02fed27af7f6..a5e0bc727f03 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -434,7 +434,7 @@ void __init smp_setup_cpu_maps(void)
j, cpu, intserv[j]);
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, intserv[j]);
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
cpu++;
}
}
@@ -480,7 +480,7 @@ void __init smp_setup_cpu_maps(void)
maxcpus);
for (cpu = 0; cpu < maxcpus; cpu++)
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
out:
of_node_put(dn);
}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d154248cf40..e1e3059cf34b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
*/
notrace void __init machine_init(unsigned long dt_ptr)
{
+ lockdep_init();
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 65484b2200b3..d4f318721e65 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
-static volatile unsigned int cpu_callin_map[NR_CPUS];
+/* Can't be static due to PowerMac hackery */
+volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
@@ -188,11 +189,11 @@ void arch_send_call_function_single_ipi(int cpu)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
unsigned int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 0362a891e54e..acb74a17bbbf 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void)
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
-static int udbg_44x_as1_flush(void)
+static void udbg_44x_as1_flush(void)
{
if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8ef8a14abc95..7fca9355fd3d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -40,6 +40,7 @@ SECTIONS
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
+ *(.discard)
EXIT_DATA
}
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 0cef809cec21..f4d1b55aa70b 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -138,7 +138,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
-static int kvmppc_44x_init(void)
+static int __init kvmppc_44x_init(void)
{
int r;
@@ -149,7 +149,7 @@ static int kvmppc_44x_init(void)
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
}
-static void kvmppc_44x_exit(void)
+static void __exit kvmppc_44x_exit(void)
{
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 4a16f472cc18..ff3cb63b8117 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -30,6 +30,7 @@
#include "timing.h"
#include "44x_tlb.h"
+#include "trace.h"
#ifndef PPC44x_TLBE_SIZE
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
@@ -263,7 +264,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
/* XXX set tlb_44x_index to stlb_index? */
- KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
+ trace_kvm_stlb_inval(stlb_index);
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
@@ -365,8 +366,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
/* Insert shadow mapping into hardware TLB. */
kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
kvmppc_44x_tlbwe(victim, &stlbe);
- KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
- stlbe.word2, handler);
+ trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
+ stlbe.word2);
}
/* For a particular guest TLB entry, invalidate the corresponding host TLB
@@ -485,8 +486,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
}
- KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
- tlbe->word1, tlbe->word2, handler);
+ trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
return EMULATE_DONE;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 5a152a52796f..c29926846613 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -2,8 +2,7 @@
# KVM configuration
#
-config HAVE_KVM_IRQCHIP
- bool
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -59,17 +58,6 @@ config KVM_E500
If unsure, say N.
-config KVM_TRACE
- bool "KVM trace support"
- depends on KVM && MARKERS && SYSFS
- select RELAY
- select DEBUG_FS
- default n
- ---help---
- This option allows reading a trace of kvm-related events through
- relayfs. Note the ABI is not considered stable and will be
- modified in future updates.
-
source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 459c7ee580f7..37655fe19f2f 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -8,7 +8,9 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
-common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
+CFLAGS_44x_tlb.o := -I.
+CFLAGS_e500_tlb.o := -I.
+CFLAGS_emulate.o := -I.
kvm-objs := $(common-objs-y) powerpc.o emulate.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 642e4204cf25..e7bf4d029484 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -520,7 +520,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return kvmppc_core_vcpu_translate(vcpu, tr);
}
-int kvmppc_booke_init(void)
+int __init kvmppc_booke_init(void)
{
unsigned long ivor[16];
unsigned long max_ivor = 0;
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index d8067fd81cdd..64949eef43f1 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -60,9 +60,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
kvmppc_e500_tlb_setup(vcpu_e500);
- /* Use the same core vertion as host's */
- vcpu->arch.pvr = mfspr(SPRN_PVR);
-
return 0;
}
@@ -132,7 +129,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
-static int kvmppc_e500_init(void)
+static int __init kvmppc_e500_init(void)
{
int r, i;
unsigned long ivor[3];
@@ -160,7 +157,7 @@ static int kvmppc_e500_init(void)
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE);
}
-static void kvmppc_e500_exit(void)
+static void __init kvmppc_e500_exit(void)
{
kvmppc_booke_exit();
}
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 3f760414b9f8..be95b8d8e3b7 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -180,6 +180,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_MMUCSR0:
vcpu->arch.gpr[rt] = 0; break;
+ case SPRN_MMUCFG:
+ vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 0e773fc2d5e4..fb1e1dc11ba5 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -22,6 +22,7 @@
#include "../mm/mmu_decl.h"
#include "e500_tlb.h"
+#include "trace.h"
#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
@@ -224,9 +225,8 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
stlbe->mas1 = 0;
- KVMTRACE_5D(STLB_INVAL, &vcpu_e500->vcpu, index_of(tlbsel, esel),
- stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7,
- handler);
+ trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
+ stlbe->mas3, stlbe->mas7);
}
static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -269,7 +269,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
- tsized = (vcpu_e500->mas4 >> 8) & 0xf;
+ tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
@@ -309,7 +309,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
/* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(BOOKE_PAGESZ_4K)
+ stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
| MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN)
| e500_shadow_mas2_attrib(gtlbe->mas2,
@@ -319,9 +319,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
vcpu_e500->vcpu.arch.msr & MSR_PR);
stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
- KVMTRACE_5D(STLB_WRITE, &vcpu_e500->vcpu, index_of(tlbsel, esel),
- stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7,
- handler);
+ trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
+ stlbe->mas3, stlbe->mas7);
}
/* XXX only map the one-one case, for now use TLB0 */
@@ -535,9 +534,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas3 = vcpu_e500->mas3;
gtlbe->mas7 = vcpu_e500->mas7;
- KVMTRACE_5D(GTLB_WRITE, vcpu, vcpu_e500->mas0,
- gtlbe->mas1, gtlbe->mas2, gtlbe->mas3, gtlbe->mas7,
- handler);
+ trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
+ gtlbe->mas3, gtlbe->mas7);
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
@@ -545,7 +543,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
case 0:
/* TLB0 */
gtlbe->mas1 &= ~MAS1_TSIZE(~0);
- gtlbe->mas1 |= MAS1_TSIZE(BOOKE_PAGESZ_4K);
+ gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
stlbsel = 0;
sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
@@ -679,14 +677,14 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
/* Insert large initial mapping for guest. */
tlbe = &vcpu_e500->guest_tlb[1][0];
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_256M);
+ tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
tlbe->mas2 = 0;
tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
tlbe->mas7 = 0;
/* 4K map for serial output. Used by kernel wrapper. */
tlbe = &vcpu_e500->guest_tlb[1][1];
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_4K);
+ tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
tlbe->mas7 = 0;
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 45b064b76906..d28e3010a5e2 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -16,7 +16,7 @@
#define __KVM_E500_TLB_H__
#include <linux/kvm_host.h>
-#include <asm/mmu-fsl-booke.h>
+#include <asm/mmu-book3e.h>
#include <asm/tlb.h>
#include <asm/kvm_e500.h>
@@ -59,7 +59,7 @@ extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
/* TLB helper functions */
static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
{
- return (tlbe->mas1 >> 8) & 0xf;
+ return (tlbe->mas1 >> 7) & 0x1f;
}
static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
@@ -70,7 +70,7 @@ static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
static inline u64 get_tlb_bytes(const struct tlbe *tlbe)
{
unsigned int pgsize = get_tlb_size(tlbe);
- return 1ULL << 10 << (pgsize << 1);
+ return 1ULL << 10 << pgsize;
}
static inline gva_t get_tlb_end(const struct tlbe *tlbe)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index a561d6e8da1c..7737146af3fb 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -29,6 +29,7 @@
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include "timing.h"
+#include "trace.h"
#define OP_TRAP 3
@@ -187,7 +188,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_SRR1:
vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
case SPRN_PVR:
- vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+ vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break;
+ case SPRN_PIR:
+ vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
@@ -417,7 +420,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
- KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit);
+ trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
if (advance)
vcpu->arch.pc += 4; /* Advance past emulated instruction. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2cf915e51e7e..0341391eff12 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -31,6 +31,9 @@
#include "timing.h"
#include "../mm/mmu_decl.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
return gfn;
@@ -122,13 +125,17 @@ struct kvm *kvm_arch_create_vm(void)
static void kvmppc_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
new file mode 100644
index 000000000000..67f219de0455
--- /dev/null
+++ b/arch/powerpc/kvm/trace.h
@@ -0,0 +1,104 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoint for guest mode entry.
+ */
+TRACE_EVENT(kvm_ppc_instr,
+ TP_PROTO(unsigned int inst, unsigned long pc, unsigned int emulate),
+ TP_ARGS(inst, pc, emulate),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, inst )
+ __field( unsigned long, pc )
+ __field( unsigned int, emulate )
+ ),
+
+ TP_fast_assign(
+ __entry->inst = inst;
+ __entry->pc = pc;
+ __entry->emulate = emulate;
+ ),
+
+ TP_printk("inst %u pc 0x%lx emulate %u\n",
+ __entry->inst, __entry->pc, __entry->emulate)
+);
+
+TRACE_EVENT(kvm_stlb_inval,
+ TP_PROTO(unsigned int stlb_index),
+ TP_ARGS(stlb_index),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, stlb_index )
+ ),
+
+ TP_fast_assign(
+ __entry->stlb_index = stlb_index;
+ ),
+
+ TP_printk("stlb_index %u", __entry->stlb_index)
+);
+
+TRACE_EVENT(kvm_stlb_write,
+ TP_PROTO(unsigned int victim, unsigned int tid, unsigned int word0,
+ unsigned int word1, unsigned int word2),
+ TP_ARGS(victim, tid, word0, word1, word2),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, victim )
+ __field( unsigned int, tid )
+ __field( unsigned int, word0 )
+ __field( unsigned int, word1 )
+ __field( unsigned int, word2 )
+ ),
+
+ TP_fast_assign(
+ __entry->victim = victim;
+ __entry->tid = tid;
+ __entry->word0 = word0;
+ __entry->word1 = word1;
+ __entry->word2 = word2;
+ ),
+
+ TP_printk("victim %u tid %u w0 %u w1 %u w2 %u",
+ __entry->victim, __entry->tid, __entry->word0,
+ __entry->word1, __entry->word2)
+);
+
+TRACE_EVENT(kvm_gtlb_write,
+ TP_PROTO(unsigned int gtlb_index, unsigned int tid, unsigned int word0,
+ unsigned int word1, unsigned int word2),
+ TP_ARGS(gtlb_index, tid, word0, word1, word2),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, gtlb_index )
+ __field( unsigned int, tid )
+ __field( unsigned int, word0 )
+ __field( unsigned int, word1 )
+ __field( unsigned int, word2 )
+ ),
+
+ TP_fast_assign(
+ __entry->gtlb_index = gtlb_index;
+ __entry->tid = tid;
+ __entry->word0 = word0;
+ __entry->word1 = word1;
+ __entry->word2 = word2;
+ ),
+
+ TP_printk("gtlb_index %u tid %u w0 %u w1 %u w2 %u",
+ __entry->gtlb_index, __entry->tid, __entry->word0,
+ __entry->word1, __entry->word2)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 2d2192e48de7..3e68363405b7 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
new file mode 100644
index 000000000000..c2186c74c85a
--- /dev/null
+++ b/arch/powerpc/mm/highmem.c
@@ -0,0 +1,77 @@
+/*
+ * highmem.c: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ *
+ * Reworked for PowerPC by various contributors. Moved from
+ * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+{
+ unsigned int idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+ __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
+ local_flush_tlb_page(NULL, vaddr);
+
+ return (void*) vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
+ pagefault_enable();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+#endif
+ pagefault_enable();
+}
+EXPORT_SYMBOL(kunmap_atomic);
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 98cd1dc2ae75..6e9b69c99856 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -31,7 +31,7 @@ struct stab_entry {
#define NR_STAB_CACHE_ENTRIES 8
static DEFINE_PER_CPU(long, stab_cache_ptr);
-static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
+static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
/*
* Create a segment table entry for the given esid/vsid pair.
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 42e09a9f77e2..0362c88f47d7 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@@ -65,7 +66,6 @@ define_machine(warp) {
static u32 post_info;
-/* I am not sure this is the best place for this... */
static int __init warp_post_info(void)
{
struct device_node *np;
@@ -194,9 +194,9 @@ static int pika_setup_leds(void)
return 0;
}
-static void pika_setup_critical_temp(struct i2c_client *client)
+static void pika_setup_critical_temp(struct device_node *np,
+ struct i2c_client *client)
{
- struct device_node *np;
int irq, rc;
/* Do this before enabling critical temp interrupt since we
@@ -208,14 +208,7 @@ static void pika_setup_critical_temp(struct i2c_client *client)
i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
- np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
- if (np == NULL) {
- printk(KERN_ERR __FILE__ ": Unable to find ad7414\n");
- return;
- }
-
irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if (irq == NO_IRQ) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return;
@@ -244,32 +237,24 @@ static inline void pika_dtm_check_fan(void __iomem *fpga)
static int pika_dtm_thread(void __iomem *fpga)
{
- struct i2c_adapter *adap;
+ struct device_node *np;
struct i2c_client *client;
- /* We loop in case either driver was compiled as a module and
- * has not been insmoded yet.
- */
- while (!(adap = i2c_get_adapter(0))) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- }
-
- while (1) {
- list_for_each_entry(client, &adap->clients, list)
- if (client->addr == 0x4a)
- goto found_it;
+ np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
+ if (np == NULL)
+ return -ENOENT;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ client = of_find_i2c_device_by_node(np);
+ if (client == NULL) {
+ of_node_put(np);
+ return -ENOENT;
}
-found_it:
- pika_setup_critical_temp(client);
+ pika_setup_critical_temp(np, client);
- i2c_put_adapter(adap);
+ of_node_put(np);
- printk(KERN_INFO "PIKA DTM thread running.\n");
+ printk(KERN_INFO "Warp DTM thread running.\n");
while (!kthread_should_stop()) {
int val;
@@ -291,7 +276,6 @@ found_it:
return 0;
}
-
static int __init pika_dtm_start(void)
{
struct task_struct *dtm_thread;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 77f90b356356..60ed9c067b1d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -285,6 +285,7 @@ static struct of_device_id mpc85xx_ids[] = {
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,rapidio-delta", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index cc0b0db8a6f3..62c592ede641 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -52,20 +52,19 @@ smp_85xx_kick_cpu(int nr)
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
- local_irq_save(flags);
-
np = of_get_cpu_node(nr, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
if (cpu_rel_addr == NULL) {
printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
- local_irq_restore(flags);
return;
}
/* Map the spin table */
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ local_irq_save(flags);
+
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
@@ -73,10 +72,10 @@ smp_85xx_kick_cpu(int nr)
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
- iounmap(bptr_vaddr);
-
local_irq_restore(flags);
+ iounmap(bptr_vaddr);
+
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index d0e8443b12c6..747d8fb3ab82 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -102,10 +102,11 @@ static struct of_device_id __initdata socrates_of_bus_ids[] = {
{},
};
-static void __init socrates_init(void)
+static int __init socrates_publish_devices(void)
{
- of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
+ return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
}
+machine_device_initcall(socrates, socrates_publish_devices);
/*
* Called very early, device-tree isn't unflattened
@@ -124,7 +125,6 @@ define_machine(socrates) {
.name = "Socrates",
.probe = socrates_probe,
.setup_arch = socrates_setup_arch,
- .init = socrates_init,
.init_IRQ = socrates_pic_init,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index ee01532786e4..1b426050a2f9 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -32,7 +32,6 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
-#include <linux/of_platform.h>
/* A few bit definitions needed for fixups on some boards */
#define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 9046803c8276..bc97fada48c6 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -36,7 +36,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
@@ -140,31 +139,6 @@ static void __devinit smp_cell_setup_cpu(int cpu)
mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase = 0;
-
-static void __devinit cell_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-static void __devinit cell_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
static void __devinit smp_cell_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -224,8 +198,8 @@ void __init smp_init_cell(void)
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = cell_give_timebase;
- smp_ops->take_timebase = cell_take_timebase;
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
}
DBG(" <- smp_init_cell()\n");
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 10a4a4d063b6..02cafecc90e3 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -26,7 +26,6 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
@@ -42,40 +41,12 @@ static void __devinit smp_chrp_setup_cpu(int cpu_nr)
mpic_setup_this_cpu();
}
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned int timebase_upper = 0, timebase_lower = 0;
-
-void __devinit smp_chrp_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase_upper = get_tbu();
- timebase_lower = get_tbl();
- spin_unlock(&timebase_lock);
-
- while (timebase_upper || timebase_lower)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-void __devinit smp_chrp_take_timebase(void)
-{
- while (!(timebase_upper || timebase_lower))
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase_upper, timebase_lower);
- timebase_upper = 0;
- timebase_lower = 0;
- spin_unlock(&timebase_lock);
- printk("CPU %i taken timebase\n", smp_processor_id());
-}
-
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
.setup_cpu = smp_chrp_setup_cpu,
- .give_timebase = smp_chrp_give_timebase,
- .take_timebase = smp_chrp_take_timebase,
+ .give_timebase = rtas_give_timebase,
+ .take_timebase = rtas_take_timebase,
};
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 153051eb6d93..a4619347aa7e 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,20 +71,25 @@ static void pas_restart(char *cmd)
}
#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(timebase_lock);
+static raw_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
{
- spin_lock(&timebase_lock);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
- spin_unlock(&timebase_lock);
+ __raw_spin_unlock(&timebase_lock);
while (timebase)
barrier();
mtspr(SPRN_TBCTL, TBCTL_RESTART);
+ local_irq_restore(flags);
}
static void __devinit pas_take_timebase(void)
@@ -92,10 +97,10 @@ static void __devinit pas_take_timebase(void)
while (!timebase)
smp_rmb();
- spin_lock(&timebase_lock);
+ __raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- spin_unlock(&timebase_lock);
+ __raw_spin_unlock(&timebase_lock);
}
struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 86f69a4eb49b..c20522656367 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -103,11 +103,6 @@ unsigned long smu_cmdbuf_abs;
EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif
-#ifdef CONFIG_SMP
-extern struct smp_ops_t psurge_smp_ops;
-extern struct smp_ops_t core99_smp_ops;
-#endif /* CONFIG_SMP */
-
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
@@ -341,34 +336,6 @@ static void __init pmac_setup_arch(void)
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif
-#ifdef CONFIG_SMP
- /* Check for Core99 */
- ic = of_find_node_by_name(NULL, "uni-n");
- if (!ic)
- ic = of_find_node_by_name(NULL, "u3");
- if (!ic)
- ic = of_find_node_by_name(NULL, "u4");
- if (ic) {
- of_node_put(ic);
- smp_ops = &core99_smp_ops;
- }
-#ifdef CONFIG_PPC32
- else {
- /*
- * We have to set bits in cpu_possible_map here since the
- * secondary CPU(s) aren't in the device tree, and
- * setup_per_cpu_areas only allocates per-cpu data for
- * CPUs in the cpu_possible_map.
- */
- int cpu;
-
- for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
- cpu_set(cpu, cpu_possible_map);
- smp_ops = &psurge_smp_ops;
- }
-#endif
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_ADB
if (strstr(cmd_line, "adb_sync")) {
extern int __adb_probe_sync;
@@ -512,6 +479,14 @@ static void __init pmac_init_early(void)
#ifdef CONFIG_PPC64
iommu_init_early_dart();
#endif
+
+ /* SMP Init has to be done early as we need to patch up
+ * cpu_possible_map before interrupt stacks are allocated
+ * or kaboom...
+ */
+#ifdef CONFIG_SMP
+ pmac_setup_smp();
+#endif
}
static int __init pmac_declare_of_platform_devices(void)
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index cf1dbe758890..6d4da7b46b41 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -64,10 +64,11 @@
extern void __secondary_start_pmac_0(void);
extern int pmac_pfunc_base_install(void);
-#ifdef CONFIG_PPC32
+static void (*pmac_tb_freeze)(int freeze);
+static u64 timebase;
+static int tb_req;
-/* Sync flag for HW tb sync */
-static volatile int sec_tb_reset = 0;
+#ifdef CONFIG_PPC32
/*
* Powersurge (old powermac SMP) support.
@@ -294,6 +295,9 @@ static int __init smp_psurge_probe(void)
psurge_quad_init();
/* All released cards using this HW design have 4 CPUs */
ncpus = 4;
+ /* No sure how timebase sync works on those, let's use SW */
+ smp_ops->give_timebase = smp_generic_give_timebase;
+ smp_ops->take_timebase = smp_generic_take_timebase;
} else {
iounmap(quad_base);
if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
@@ -308,18 +312,15 @@ static int __init smp_psurge_probe(void)
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
- /*
- * This is necessary because OF doesn't know about the
+ /* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't
- * set their bits in cpu_possible_map and cpu_present_map.
+ * set their bits in cpu_present_map.
*/
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
- for (i = 1; i < ncpus ; ++i) {
+ for (i = 1; i < ncpus ; ++i)
cpu_set(i, cpu_present_map);
- set_hard_smp_processor_id(i, i);
- }
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
@@ -329,8 +330,14 @@ static int __init smp_psurge_probe(void)
static void __init smp_psurge_kick_cpu(int nr)
{
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
- unsigned long a;
- int i;
+ unsigned long a, flags;
+ int i, j;
+
+ /* Defining this here is evil ... but I prefer hiding that
+ * crap to avoid giving people ideas that they can do the
+ * same.
+ */
+ extern volatile unsigned int cpu_callin_map[NR_CPUS];
/* may need to flush here if secondary bats aren't setup */
for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
@@ -339,47 +346,52 @@ static void __init smp_psurge_kick_cpu(int nr)
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+ /* This is going to freeze the timeebase, we disable interrupts */
+ local_irq_save(flags);
+
out_be32(psurge_start, start);
mb();
psurge_set_ipi(nr);
+
/*
* We can't use udelay here because the timebase is now frozen.
*/
for (i = 0; i < 2000; ++i)
- barrier();
+ asm volatile("nop" : : : "memory");
psurge_clr_ipi(nr);
- if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
-}
-
-/*
- * With the dual-cpu powersurge board, the decrementers and timebases
- * of both cpus are frozen after the secondary cpu is started up,
- * until we give the secondary cpu another interrupt. This routine
- * uses this to get the timebases synchronized.
- * -- paulus.
- */
-static void __init psurge_dual_sync_tb(int cpu_nr)
-{
- int t;
-
- set_dec(tb_ticks_per_jiffy);
- /* XXX fixme */
- set_tb(0, 0);
-
- if (cpu_nr > 0) {
+ /*
+ * Also, because the timebase is frozen, we must not return to the
+ * caller which will try to do udelay's etc... Instead, we wait -here-
+ * for the CPU to callin.
+ */
+ for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
+ for (j = 1; j < 10000; j++)
+ asm volatile("nop" : : : "memory");
+ asm volatile("sync" : : : "memory");
+ }
+ if (!cpu_callin_map[nr])
+ goto stuck;
+
+ /* And we do the TB sync here too for standard dual CPU cards */
+ if (psurge_type == PSURGE_DUAL) {
+ while(!tb_req)
+ barrier();
+ tb_req = 0;
+ mb();
+ timebase = get_tb();
+ mb();
+ while (timebase)
+ barrier();
mb();
- sec_tb_reset = 1;
- return;
}
+ stuck:
+ /* now interrupt the secondary, restarting both TBs */
+ if (psurge_type == PSURGE_DUAL)
+ psurge_set_ipi(1);
- /* wait for the secondary to have reset its TB before proceeding */
- for (t = 10000000; t > 0 && !sec_tb_reset; --t)
- ;
-
- /* now interrupt the secondary, starting both TBs */
- psurge_set_ipi(1);
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
}
static struct irqaction psurge_irqaction = {
@@ -390,36 +402,35 @@ static struct irqaction psurge_irqaction = {
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
+ if (cpu_nr != 0)
+ return;
- if (cpu_nr == 0) {
- /* If we failed to start the second CPU, we should still
- * send it an IPI to start the timebase & DEC or we might
- * have them stuck.
- */
- if (num_online_cpus() < 2) {
- if (psurge_type == PSURGE_DUAL)
- psurge_set_ipi(1);
- return;
- }
- /* reset the entry point so if we get another intr we won't
- * try to startup again */
- out_be32(psurge_start, 0x100);
- if (setup_irq(30, &psurge_irqaction))
- printk(KERN_ERR "Couldn't get primary IPI interrupt");
- }
-
- if (psurge_type == PSURGE_DUAL)
- psurge_dual_sync_tb(cpu_nr);
+ /* reset the entry point so if we get another intr we won't
+ * try to startup again */
+ out_be32(psurge_start, 0x100);
+ if (setup_irq(30, &psurge_irqaction))
+ printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
void __init smp_psurge_take_timebase(void)
{
- /* Dummy implementation */
+ if (psurge_type != PSURGE_DUAL)
+ return;
+
+ tb_req = 1;
+ mb();
+ while (!timebase)
+ barrier();
+ mb();
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ mb();
+ set_dec(tb_ticks_per_jiffy/2);
}
void __init smp_psurge_give_timebase(void)
{
- /* Dummy implementation */
+ /* Nothing to do here */
}
/* PowerSurge-style Macs */
@@ -437,9 +448,6 @@ struct smp_ops_t psurge_smp_ops = {
* Core 99 and later support
*/
-static void (*pmac_tb_freeze)(int freeze);
-static u64 timebase;
-static int tb_req;
static void smp_core99_give_timebase(void)
{
@@ -478,7 +486,6 @@ static void __devinit smp_core99_take_timebase(void)
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
mb();
- set_dec(tb_ticks_per_jiffy/2);
local_irq_restore(flags);
}
@@ -920,3 +927,34 @@ struct smp_ops_t core99_smp_ops = {
# endif
#endif
};
+
+void __init pmac_setup_smp(void)
+{
+ struct device_node *np;
+
+ /* Check for Core99 */
+ np = of_find_node_by_name(NULL, "uni-n");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u3");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u4");
+ if (np) {
+ of_node_put(np);
+ smp_ops = &core99_smp_ops;
+ }
+#ifdef CONFIG_PPC32
+ else {
+ /* We have to set bits in cpu_possible_map here since the
+ * secondary CPU(s) aren't in the device tree. Various
+ * things won't be initialized for CPUs not in the possible
+ * map, so we really need to fix it up here.
+ */
+ int cpu;
+
+ for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
+ cpu_set(cpu, cpu_possible_map);
+ smp_ops = &psurge_smp_ops;
+ }
+#endif /* CONFIG_PPC32 */
+}
+
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f6e04bcc70ef..51ffde40af2b 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -37,7 +37,7 @@
*/
#define MSG_COUNT 4
-static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]);
+static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
static void do_message_pass(int target, int msg)
{
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1a231c389ba0..1f8f6cfb94f7 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -35,7 +35,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
@@ -118,31 +117,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
}
#endif /* CONFIG_XICS */
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase = 0;
-
-static void __devinit pSeries_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-static void __devinit pSeries_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
static void __devinit smp_pSeries_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -209,8 +183,8 @@ static void __init smp_init_pseries(void)
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = pSeries_give_timebase;
- smp_ops->take_timebase = pSeries_take_timebase;
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
}
pr_debug(" <- smp_init_pSeries()\n");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9c3af5045495..d46de1f0f3ee 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -279,28 +279,29 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
}
#ifdef CONFIG_PPC_DCR
-static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
+ struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
const u32 *dbasep;
- dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL);
+ dbasep = of_get_property(node, "dcr-reg", NULL);
- rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size);
+ rb->dhost = dcr_map(node, *dbasep + offset, size);
BUG_ON(!DCR_MAP_OK(rb->dhost));
}
-static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr,
- struct mpic_reg_bank *rb, unsigned int offset,
- unsigned int size)
+static inline void mpic_map(struct mpic *mpic, struct device_node *node,
+ phys_addr_t phys_addr, struct mpic_reg_bank *rb,
+ unsigned int offset, unsigned int size)
{
if (mpic->flags & MPIC_USES_DCR)
- _mpic_map_dcr(mpic, rb, offset, size);
+ _mpic_map_dcr(mpic, node, rb, offset, size);
else
_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
}
#else /* CONFIG_PPC_DCR */
-#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
+#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
#endif /* !CONFIG_PPC_DCR */
@@ -1052,11 +1053,10 @@ struct mpic * __init mpic_alloc(struct device_node *node,
int intvec_top;
u64 paddr = phys_addr;
- mpic = alloc_bootmem(sizeof(struct mpic));
+ mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
return NULL;
-
- memset(mpic, 0, sizeof(struct mpic));
+
mpic->name = name;
mpic->hc_irq = mpic_irq_chip;
@@ -1152,8 +1152,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
/* Map the global registers */
- mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
- mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
+ mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+ mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
/* Reset */
if (flags & MPIC_WANTS_RESET) {
@@ -1194,7 +1194,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) {
- mpic_map(mpic, paddr, &mpic->cpuregs[i],
+ mpic_map(mpic, node, paddr, &mpic->cpuregs[i],
MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
0x1000);
}
@@ -1202,7 +1202,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
- mpic_map(mpic, paddr, &mpic->isus[0],
+ mpic_map(mpic, node, paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
@@ -1256,8 +1256,10 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
BUG_ON(isu_num >= MPIC_MAX_ISU);
- mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
+ mpic_map(mpic, mpic->irqhost->of_node,
+ paddr, &mpic->isus[isu_num], 0,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index b28b0e512d67..237e3654f48c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -112,6 +112,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
{
unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0;
+ u32 ret;
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
@@ -139,11 +140,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
}
/* wait for the QE_CR_FLG to clear */
- while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG)
- cpu_relax();
+ ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+ 100, 0);
+ /* On timeout (e.g. failure), the expression will be false (ret == 0),
+ otherwise it will be true (ret == 1). */
spin_unlock_irqrestore(&qe_lock, flags);
- return 0;
+ return ret == 1;
}
EXPORT_SYMBOL(qe_issue_cmd);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 4aba83b31596..2bc479ab3a66 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
const u8 *temp_key = key;
u32 *flags = &tfm->crt_flags;
- if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE))) {
-
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ DES_KEY_SIZE)) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 807997f7414b..4943654ed7fd 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -125,4 +125,32 @@ struct chsc_cpd_info {
#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
+#ifdef __KERNEL__
+
+struct css_general_char {
+ u64 : 12;
+ u32 dynio : 1; /* bit 12 */
+ u32 : 28;
+ u32 aif : 1; /* bit 41 */
+ u32 : 3;
+ u32 mcss : 1; /* bit 45 */
+ u32 fcs : 1; /* bit 46 */
+ u32 : 1;
+ u32 ext_mb : 1; /* bit 48 */
+ u32 : 7;
+ u32 aif_tdd : 1; /* bit 56 */
+ u32 : 1;
+ u32 qebsm : 1; /* bit 58 */
+ u32 : 8;
+ u32 aif_osa : 1; /* bit 67 */
+ u32 : 14;
+ u32 cib : 1; /* bit 82 */
+ u32 : 5;
+ u32 fcx : 1; /* bit 88 */
+ u32 : 7;
+}__attribute__((packed));
+
+extern struct css_general_char css_general_characteristics;
+
+#endif /* __KERNEL__ */
#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 619bf94b11f1..e85679af54dd 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -15,228 +15,7 @@
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
-/**
- * struct cmd_scsw - command-mode subchannel status word
- * @key: subchannel key
- * @sctl: suspend control
- * @eswf: esw format
- * @cc: deferred condition code
- * @fmt: format
- * @pfch: prefetch
- * @isic: initial-status interruption control
- * @alcc: address-limit checking control
- * @ssi: suppress-suspended interruption
- * @zcc: zero condition code
- * @ectl: extended control
- * @pno: path not operational
- * @res: reserved
- * @fctl: function control
- * @actl: activity control
- * @stctl: status control
- * @cpa: channel program address
- * @dstat: device status
- * @cstat: subchannel status
- * @count: residual count
- */
-struct cmd_scsw {
- __u32 key : 4;
- __u32 sctl : 1;
- __u32 eswf : 1;
- __u32 cc : 2;
- __u32 fmt : 1;
- __u32 pfch : 1;
- __u32 isic : 1;
- __u32 alcc : 1;
- __u32 ssi : 1;
- __u32 zcc : 1;
- __u32 ectl : 1;
- __u32 pno : 1;
- __u32 res : 1;
- __u32 fctl : 3;
- __u32 actl : 7;
- __u32 stctl : 5;
- __u32 cpa;
- __u32 dstat : 8;
- __u32 cstat : 8;
- __u32 count : 16;
-} __attribute__ ((packed));
-
-/**
- * struct tm_scsw - transport-mode subchannel status word
- * @key: subchannel key
- * @eswf: esw format
- * @cc: deferred condition code
- * @fmt: format
- * @x: IRB-format control
- * @q: interrogate-complete
- * @ectl: extended control
- * @pno: path not operational
- * @fctl: function control
- * @actl: activity control
- * @stctl: status control
- * @tcw: TCW address
- * @dstat: device status
- * @cstat: subchannel status
- * @fcxs: FCX status
- * @schxs: subchannel-extended status
- */
-struct tm_scsw {
- u32 key:4;
- u32 :1;
- u32 eswf:1;
- u32 cc:2;
- u32 fmt:3;
- u32 x:1;
- u32 q:1;
- u32 :1;
- u32 ectl:1;
- u32 pno:1;
- u32 :1;
- u32 fctl:3;
- u32 actl:7;
- u32 stctl:5;
- u32 tcw;
- u32 dstat:8;
- u32 cstat:8;
- u32 fcxs:8;
- u32 schxs:8;
-} __attribute__ ((packed));
-
-/**
- * union scsw - subchannel status word
- * @cmd: command-mode SCSW
- * @tm: transport-mode SCSW
- */
-union scsw {
- struct cmd_scsw cmd;
- struct tm_scsw tm;
-} __attribute__ ((packed));
-
-int scsw_is_tm(union scsw *scsw);
-u32 scsw_key(union scsw *scsw);
-u32 scsw_eswf(union scsw *scsw);
-u32 scsw_cc(union scsw *scsw);
-u32 scsw_ectl(union scsw *scsw);
-u32 scsw_pno(union scsw *scsw);
-u32 scsw_fctl(union scsw *scsw);
-u32 scsw_actl(union scsw *scsw);
-u32 scsw_stctl(union scsw *scsw);
-u32 scsw_dstat(union scsw *scsw);
-u32 scsw_cstat(union scsw *scsw);
-int scsw_is_solicited(union scsw *scsw);
-int scsw_is_valid_key(union scsw *scsw);
-int scsw_is_valid_eswf(union scsw *scsw);
-int scsw_is_valid_cc(union scsw *scsw);
-int scsw_is_valid_ectl(union scsw *scsw);
-int scsw_is_valid_pno(union scsw *scsw);
-int scsw_is_valid_fctl(union scsw *scsw);
-int scsw_is_valid_actl(union scsw *scsw);
-int scsw_is_valid_stctl(union scsw *scsw);
-int scsw_is_valid_dstat(union scsw *scsw);
-int scsw_is_valid_cstat(union scsw *scsw);
-int scsw_cmd_is_valid_key(union scsw *scsw);
-int scsw_cmd_is_valid_sctl(union scsw *scsw);
-int scsw_cmd_is_valid_eswf(union scsw *scsw);
-int scsw_cmd_is_valid_cc(union scsw *scsw);
-int scsw_cmd_is_valid_fmt(union scsw *scsw);
-int scsw_cmd_is_valid_pfch(union scsw *scsw);
-int scsw_cmd_is_valid_isic(union scsw *scsw);
-int scsw_cmd_is_valid_alcc(union scsw *scsw);
-int scsw_cmd_is_valid_ssi(union scsw *scsw);
-int scsw_cmd_is_valid_zcc(union scsw *scsw);
-int scsw_cmd_is_valid_ectl(union scsw *scsw);
-int scsw_cmd_is_valid_pno(union scsw *scsw);
-int scsw_cmd_is_valid_fctl(union scsw *scsw);
-int scsw_cmd_is_valid_actl(union scsw *scsw);
-int scsw_cmd_is_valid_stctl(union scsw *scsw);
-int scsw_cmd_is_valid_dstat(union scsw *scsw);
-int scsw_cmd_is_valid_cstat(union scsw *scsw);
-int scsw_cmd_is_solicited(union scsw *scsw);
-int scsw_tm_is_valid_key(union scsw *scsw);
-int scsw_tm_is_valid_eswf(union scsw *scsw);
-int scsw_tm_is_valid_cc(union scsw *scsw);
-int scsw_tm_is_valid_fmt(union scsw *scsw);
-int scsw_tm_is_valid_x(union scsw *scsw);
-int scsw_tm_is_valid_q(union scsw *scsw);
-int scsw_tm_is_valid_ectl(union scsw *scsw);
-int scsw_tm_is_valid_pno(union scsw *scsw);
-int scsw_tm_is_valid_fctl(union scsw *scsw);
-int scsw_tm_is_valid_actl(union scsw *scsw);
-int scsw_tm_is_valid_stctl(union scsw *scsw);
-int scsw_tm_is_valid_dstat(union scsw *scsw);
-int scsw_tm_is_valid_cstat(union scsw *scsw);
-int scsw_tm_is_valid_fcxs(union scsw *scsw);
-int scsw_tm_is_valid_schxs(union scsw *scsw);
-int scsw_tm_is_solicited(union scsw *scsw);
-
-#define SCSW_FCTL_CLEAR_FUNC 0x1
-#define SCSW_FCTL_HALT_FUNC 0x2
-#define SCSW_FCTL_START_FUNC 0x4
-
-#define SCSW_ACTL_SUSPENDED 0x1
-#define SCSW_ACTL_DEVACT 0x2
-#define SCSW_ACTL_SCHACT 0x4
-#define SCSW_ACTL_CLEAR_PEND 0x8
-#define SCSW_ACTL_HALT_PEND 0x10
-#define SCSW_ACTL_START_PEND 0x20
-#define SCSW_ACTL_RESUME_PEND 0x40
-
-#define SCSW_STCTL_STATUS_PEND 0x1
-#define SCSW_STCTL_SEC_STATUS 0x2
-#define SCSW_STCTL_PRIM_STATUS 0x4
-#define SCSW_STCTL_INTER_STATUS 0x8
-#define SCSW_STCTL_ALERT_STATUS 0x10
-
-#define DEV_STAT_ATTENTION 0x80
-#define DEV_STAT_STAT_MOD 0x40
-#define DEV_STAT_CU_END 0x20
-#define DEV_STAT_BUSY 0x10
-#define DEV_STAT_CHN_END 0x08
-#define DEV_STAT_DEV_END 0x04
-#define DEV_STAT_UNIT_CHECK 0x02
-#define DEV_STAT_UNIT_EXCEP 0x01
-
-#define SCHN_STAT_PCI 0x80
-#define SCHN_STAT_INCORR_LEN 0x40
-#define SCHN_STAT_PROG_CHECK 0x20
-#define SCHN_STAT_PROT_CHECK 0x10
-#define SCHN_STAT_CHN_DATA_CHK 0x08
-#define SCHN_STAT_CHN_CTRL_CHK 0x04
-#define SCHN_STAT_INTF_CTRL_CHK 0x02
-#define SCHN_STAT_CHAIN_CHECK 0x01
-
-/*
- * architectured values for first sense byte
- */
-#define SNS0_CMD_REJECT 0x80
-#define SNS_CMD_REJECT SNS0_CMD_REJEC
-#define SNS0_INTERVENTION_REQ 0x40
-#define SNS0_BUS_OUT_CHECK 0x20
-#define SNS0_EQUIPMENT_CHECK 0x10
-#define SNS0_DATA_CHECK 0x08
-#define SNS0_OVERRUN 0x04
-#define SNS0_INCOMPL_DOMAIN 0x01
-
-/*
- * architectured values for second sense byte
- */
-#define SNS1_PERM_ERR 0x80
-#define SNS1_INV_TRACK_FORMAT 0x40
-#define SNS1_EOC 0x20
-#define SNS1_MESSAGE_TO_OPER 0x10
-#define SNS1_NO_REC_FOUND 0x08
-#define SNS1_FILE_PROTECTED 0x04
-#define SNS1_WRITE_INHIBITED 0x02
-#define SNS1_INPRECISE_END 0x01
-
-/*
- * architectured values for third sense byte
- */
-#define SNS2_REQ_INH_WRITE 0x80
-#define SNS2_CORRECTABLE 0x40
-#define SNS2_FIRST_LOG_ERR 0x20
-#define SNS2_ENV_DATA_PRESENT 0x10
-#define SNS2_INPRECISE_END 0x04
+#include <asm/scsw.h>
/**
* struct ccw1 - channel command word
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a27d0d5a6f86..78e07a622b45 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1,7 +1,7 @@
/*
* asm-s390/kvm_host.h - definition for kernel virtual machines on s390
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -40,7 +40,11 @@ struct sca_block {
struct sca_entry cpu[64];
} __attribute__((packed));
-#define KVM_PAGES_PER_HPAGE 256
+#define KVM_NR_PAGE_SIZES 2
+#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8)
+#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
+#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
+#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
#define CPUSTAT_HOST 0x80000000
#define CPUSTAT_WAIT 0x10000000
@@ -99,7 +103,9 @@ struct kvm_s390_sie_block {
__u8 reservedd0[48]; /* 0x00d0 */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
- __u8 reserved188[120]; /* 0x0188 */
+ __u8 reserved188[24]; /* 0x0188 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[92]; /* 0x01a4 */
} __attribute__((packed));
struct kvm_vcpu_stat {
@@ -180,8 +186,9 @@ struct kvm_s390_interrupt_info {
};
/* for local_interrupt.action_flags */
-#define ACTION_STORE_ON_STOP 1
-#define ACTION_STOP_ON_STOP 2
+#define ACTION_STORE_ON_STOP (1<<0)
+#define ACTION_STOP_ON_STOP (1<<1)
+#define ACTION_RELOADVCPU_ON_STOP (1<<2)
struct kvm_s390_local_interrupt {
spinlock_t lock;
@@ -225,8 +232,6 @@ struct kvm_vm_stat {
};
struct kvm_arch{
- unsigned long guest_origin;
- unsigned long guest_memsize;
struct sca_block *sca;
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 408d60b4f75b..f7ad8719d02d 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -1,37 +1,21 @@
#ifndef __ARCH_S390_PERCPU__
#define __ARCH_S390_PERCPU__
-#include <linux/compiler.h>
-#include <asm/lowcore.h>
-
/*
* s390 uses its own implementation for per cpu data, the offset of
* the cpu local data area is cached in the cpu's lowcore memory.
- * For 64 bit module code s390 forces the use of a GOT slot for the
- * address of the per cpu variable. This is needed because the module
- * may be more than 4G above the per cpu area.
*/
-#if defined(__s390x__) && defined(MODULE)
-
-#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
- extern int simple_identifier_##var(void); \
- unsigned long *__ptr; \
- asm ( "larl %0, %1@GOTENT" \
- : "=a" (__ptr) : "X" (ptr) ); \
- (typeof(ptr))((*__ptr) + (offset)); }))
-
-#else
-
-#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
- extern int simple_identifier_##var(void); \
- unsigned long __ptr; \
- asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
- (typeof(ptr)) (__ptr + (offset)); }))
+#define __my_cpu_offset S390_lowcore.percpu_offset
+/*
+ * For 64 bit module code, the module may be more than 4G above the
+ * per cpu area, use weak definitions to force the compiler to
+ * generate external references.
+ */
+#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
+#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define __my_cpu_offset S390_lowcore.percpu_offset
-
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
diff --git a/drivers/s390/cio/scsw.c b/arch/s390/include/asm/scsw.h
index f8da25ab576d..de389cb54d28 100644
--- a/drivers/s390/cio/scsw.c
+++ b/arch/s390/include/asm/scsw.h
@@ -1,15 +1,182 @@
/*
* Helper functions for scsw access.
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
+#ifndef _ASM_S390_SCSW_H_
+#define _ASM_S390_SCSW_H_
+
#include <linux/types.h>
-#include <linux/module.h>
+#include <asm/chsc.h>
#include <asm/cio.h>
-#include "css.h"
-#include "chsc.h"
+
+/**
+ * struct cmd_scsw - command-mode subchannel status word
+ * @key: subchannel key
+ * @sctl: suspend control
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @pfch: prefetch
+ * @isic: initial-status interruption control
+ * @alcc: address-limit checking control
+ * @ssi: suppress-suspended interruption
+ * @zcc: zero condition code
+ * @ectl: extended control
+ * @pno: path not operational
+ * @res: reserved
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @cpa: channel program address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @count: residual count
+ */
+struct cmd_scsw {
+ __u32 key : 4;
+ __u32 sctl : 1;
+ __u32 eswf : 1;
+ __u32 cc : 2;
+ __u32 fmt : 1;
+ __u32 pfch : 1;
+ __u32 isic : 1;
+ __u32 alcc : 1;
+ __u32 ssi : 1;
+ __u32 zcc : 1;
+ __u32 ectl : 1;
+ __u32 pno : 1;
+ __u32 res : 1;
+ __u32 fctl : 3;
+ __u32 actl : 7;
+ __u32 stctl : 5;
+ __u32 cpa;
+ __u32 dstat : 8;
+ __u32 cstat : 8;
+ __u32 count : 16;
+} __attribute__ ((packed));
+
+/**
+ * struct tm_scsw - transport-mode subchannel status word
+ * @key: subchannel key
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @x: IRB-format control
+ * @q: interrogate-complete
+ * @ectl: extended control
+ * @pno: path not operational
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @tcw: TCW address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @fcxs: FCX status
+ * @schxs: subchannel-extended status
+ */
+struct tm_scsw {
+ u32 key:4;
+ u32 :1;
+ u32 eswf:1;
+ u32 cc:2;
+ u32 fmt:3;
+ u32 x:1;
+ u32 q:1;
+ u32 :1;
+ u32 ectl:1;
+ u32 pno:1;
+ u32 :1;
+ u32 fctl:3;
+ u32 actl:7;
+ u32 stctl:5;
+ u32 tcw;
+ u32 dstat:8;
+ u32 cstat:8;
+ u32 fcxs:8;
+ u32 schxs:8;
+} __attribute__ ((packed));
+
+/**
+ * union scsw - subchannel status word
+ * @cmd: command-mode SCSW
+ * @tm: transport-mode SCSW
+ */
+union scsw {
+ struct cmd_scsw cmd;
+ struct tm_scsw tm;
+} __attribute__ ((packed));
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1
+#define SCSW_FCTL_HALT_FUNC 0x2
+#define SCSW_FCTL_START_FUNC 0x4
+
+#define SCSW_ACTL_SUSPENDED 0x1
+#define SCSW_ACTL_DEVACT 0x2
+#define SCSW_ACTL_SCHACT 0x4
+#define SCSW_ACTL_CLEAR_PEND 0x8
+#define SCSW_ACTL_HALT_PEND 0x10
+#define SCSW_ACTL_START_PEND 0x20
+#define SCSW_ACTL_RESUME_PEND 0x40
+
+#define SCSW_STCTL_STATUS_PEND 0x1
+#define SCSW_STCTL_SEC_STATUS 0x2
+#define SCSW_STCTL_PRIM_STATUS 0x4
+#define SCSW_STCTL_INTER_STATUS 0x8
+#define SCSW_STCTL_ALERT_STATUS 0x10
+
+#define DEV_STAT_ATTENTION 0x80
+#define DEV_STAT_STAT_MOD 0x40
+#define DEV_STAT_CU_END 0x20
+#define DEV_STAT_BUSY 0x10
+#define DEV_STAT_CHN_END 0x08
+#define DEV_STAT_DEV_END 0x04
+#define DEV_STAT_UNIT_CHECK 0x02
+#define DEV_STAT_UNIT_EXCEP 0x01
+
+#define SCHN_STAT_PCI 0x80
+#define SCHN_STAT_INCORR_LEN 0x40
+#define SCHN_STAT_PROG_CHECK 0x20
+#define SCHN_STAT_PROT_CHECK 0x10
+#define SCHN_STAT_CHN_DATA_CHK 0x08
+#define SCHN_STAT_CHN_CTRL_CHK 0x04
+#define SCHN_STAT_INTF_CTRL_CHK 0x02
+#define SCHN_STAT_CHAIN_CHECK 0x01
+
+/*
+ * architectured values for first sense byte
+ */
+#define SNS0_CMD_REJECT 0x80
+#define SNS_CMD_REJECT SNS0_CMD_REJEC
+#define SNS0_INTERVENTION_REQ 0x40
+#define SNS0_BUS_OUT_CHECK 0x20
+#define SNS0_EQUIPMENT_CHECK 0x10
+#define SNS0_DATA_CHECK 0x08
+#define SNS0_OVERRUN 0x04
+#define SNS0_INCOMPL_DOMAIN 0x01
+
+/*
+ * architectured values for second sense byte
+ */
+#define SNS1_PERM_ERR 0x80
+#define SNS1_INV_TRACK_FORMAT 0x40
+#define SNS1_EOC 0x20
+#define SNS1_MESSAGE_TO_OPER 0x10
+#define SNS1_NO_REC_FOUND 0x08
+#define SNS1_FILE_PROTECTED 0x04
+#define SNS1_WRITE_INHIBITED 0x02
+#define SNS1_INPRECISE_END 0x01
+
+/*
+ * architectured values for third sense byte
+ */
+#define SNS2_REQ_INH_WRITE 0x80
+#define SNS2_CORRECTABLE 0x40
+#define SNS2_FIRST_LOG_ERR 0x20
+#define SNS2_ENV_DATA_PRESENT 0x10
+#define SNS2_INPRECISE_END 0x04
/**
* scsw_is_tm - check for transport mode scsw
@@ -18,11 +185,10 @@
* Return non-zero if the specified scsw is a transport mode scsw, zero
* otherwise.
*/
-int scsw_is_tm(union scsw *scsw)
+static inline int scsw_is_tm(union scsw *scsw)
{
return css_general_characteristics.fcx && (scsw->tm.x == 1);
}
-EXPORT_SYMBOL(scsw_is_tm);
/**
* scsw_key - return scsw key field
@@ -31,14 +197,13 @@ EXPORT_SYMBOL(scsw_is_tm);
* Return the value of the key field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_key(union scsw *scsw)
+static inline u32 scsw_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.key;
else
return scsw->cmd.key;
}
-EXPORT_SYMBOL(scsw_key);
/**
* scsw_eswf - return scsw eswf field
@@ -47,14 +212,13 @@ EXPORT_SYMBOL(scsw_key);
* Return the value of the eswf field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_eswf(union scsw *scsw)
+static inline u32 scsw_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.eswf;
else
return scsw->cmd.eswf;
}
-EXPORT_SYMBOL(scsw_eswf);
/**
* scsw_cc - return scsw cc field
@@ -63,14 +227,13 @@ EXPORT_SYMBOL(scsw_eswf);
* Return the value of the cc field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_cc(union scsw *scsw)
+static inline u32 scsw_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cc;
else
return scsw->cmd.cc;
}
-EXPORT_SYMBOL(scsw_cc);
/**
* scsw_ectl - return scsw ectl field
@@ -79,14 +242,13 @@ EXPORT_SYMBOL(scsw_cc);
* Return the value of the ectl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_ectl(union scsw *scsw)
+static inline u32 scsw_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.ectl;
else
return scsw->cmd.ectl;
}
-EXPORT_SYMBOL(scsw_ectl);
/**
* scsw_pno - return scsw pno field
@@ -95,14 +257,13 @@ EXPORT_SYMBOL(scsw_ectl);
* Return the value of the pno field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_pno(union scsw *scsw)
+static inline u32 scsw_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.pno;
else
return scsw->cmd.pno;
}
-EXPORT_SYMBOL(scsw_pno);
/**
* scsw_fctl - return scsw fctl field
@@ -111,14 +272,13 @@ EXPORT_SYMBOL(scsw_pno);
* Return the value of the fctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_fctl(union scsw *scsw)
+static inline u32 scsw_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.fctl;
else
return scsw->cmd.fctl;
}
-EXPORT_SYMBOL(scsw_fctl);
/**
* scsw_actl - return scsw actl field
@@ -127,14 +287,13 @@ EXPORT_SYMBOL(scsw_fctl);
* Return the value of the actl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_actl(union scsw *scsw)
+static inline u32 scsw_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.actl;
else
return scsw->cmd.actl;
}
-EXPORT_SYMBOL(scsw_actl);
/**
* scsw_stctl - return scsw stctl field
@@ -143,14 +302,13 @@ EXPORT_SYMBOL(scsw_actl);
* Return the value of the stctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_stctl(union scsw *scsw)
+static inline u32 scsw_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.stctl;
else
return scsw->cmd.stctl;
}
-EXPORT_SYMBOL(scsw_stctl);
/**
* scsw_dstat - return scsw dstat field
@@ -159,14 +317,13 @@ EXPORT_SYMBOL(scsw_stctl);
* Return the value of the dstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_dstat(union scsw *scsw)
+static inline u32 scsw_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.dstat;
else
return scsw->cmd.dstat;
}
-EXPORT_SYMBOL(scsw_dstat);
/**
* scsw_cstat - return scsw cstat field
@@ -175,14 +332,13 @@ EXPORT_SYMBOL(scsw_dstat);
* Return the value of the cstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_cstat(union scsw *scsw)
+static inline u32 scsw_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cstat;
else
return scsw->cmd.cstat;
}
-EXPORT_SYMBOL(scsw_cstat);
/**
* scsw_cmd_is_valid_key - check key field validity
@@ -191,11 +347,10 @@ EXPORT_SYMBOL(scsw_cstat);
* Return non-zero if the key field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_key(union scsw *scsw)
+static inline int scsw_cmd_is_valid_key(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_key);
/**
* scsw_cmd_is_valid_sctl - check fctl field validity
@@ -204,11 +359,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_key);
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_sctl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
/**
* scsw_cmd_is_valid_eswf - check eswf field validity
@@ -217,11 +371,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
* Return non-zero if the eswf field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_eswf(union scsw *scsw)
+static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
/**
* scsw_cmd_is_valid_cc - check cc field validity
@@ -230,12 +383,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
* Return non-zero if the cc field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_cc(union scsw *scsw)
+static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
/**
* scsw_cmd_is_valid_fmt - check fmt field validity
@@ -244,11 +396,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
* Return non-zero if the fmt field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_fmt(union scsw *scsw)
+static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
/**
* scsw_cmd_is_valid_pfch - check pfch field validity
@@ -257,11 +408,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
* Return non-zero if the pfch field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_pfch(union scsw *scsw)
+static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
/**
* scsw_cmd_is_valid_isic - check isic field validity
@@ -270,11 +420,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
* Return non-zero if the isic field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_isic(union scsw *scsw)
+static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
/**
* scsw_cmd_is_valid_alcc - check alcc field validity
@@ -283,11 +432,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
* Return non-zero if the alcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_alcc(union scsw *scsw)
+static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
/**
* scsw_cmd_is_valid_ssi - check ssi field validity
@@ -296,11 +444,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
* Return non-zero if the ssi field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_ssi(union scsw *scsw)
+static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
/**
* scsw_cmd_is_valid_zcc - check zcc field validity
@@ -309,12 +456,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
* Return non-zero if the zcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_zcc(union scsw *scsw)
+static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
/**
* scsw_cmd_is_valid_ectl - check ectl field validity
@@ -323,13 +469,12 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
* Return non-zero if the ectl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_ectl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
/**
* scsw_cmd_is_valid_pno - check pno field validity
@@ -338,7 +483,7 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
* Return non-zero if the pno field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_pno(union scsw *scsw)
+static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
{
return (scsw->cmd.fctl != 0) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
@@ -346,7 +491,6 @@ int scsw_cmd_is_valid_pno(union scsw *scsw)
((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
/**
* scsw_cmd_is_valid_fctl - check fctl field validity
@@ -355,12 +499,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_fctl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
/**
* scsw_cmd_is_valid_actl - check actl field validity
@@ -369,12 +512,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
* Return non-zero if the actl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_actl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
/**
* scsw_cmd_is_valid_stctl - check stctl field validity
@@ -383,12 +525,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
* Return non-zero if the stctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_stctl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
/**
* scsw_cmd_is_valid_dstat - check dstat field validity
@@ -397,12 +538,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
* Return non-zero if the dstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_dstat(union scsw *scsw)
+static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
/**
* scsw_cmd_is_valid_cstat - check cstat field validity
@@ -411,12 +551,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
* Return non-zero if the cstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_cstat(union scsw *scsw)
+static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
/**
* scsw_tm_is_valid_key - check key field validity
@@ -425,11 +564,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
* Return non-zero if the key field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_key(union scsw *scsw)
+static inline int scsw_tm_is_valid_key(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_key);
/**
* scsw_tm_is_valid_eswf - check eswf field validity
@@ -438,11 +576,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_key);
* Return non-zero if the eswf field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_eswf(union scsw *scsw)
+static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
/**
* scsw_tm_is_valid_cc - check cc field validity
@@ -451,12 +588,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
* Return non-zero if the cc field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_cc(union scsw *scsw)
+static inline int scsw_tm_is_valid_cc(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_cc);
/**
* scsw_tm_is_valid_fmt - check fmt field validity
@@ -465,11 +601,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cc);
* Return non-zero if the fmt field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_fmt(union scsw *scsw)
+static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
/**
* scsw_tm_is_valid_x - check x field validity
@@ -478,11 +613,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
* Return non-zero if the x field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_x(union scsw *scsw)
+static inline int scsw_tm_is_valid_x(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_x);
/**
* scsw_tm_is_valid_q - check q field validity
@@ -491,11 +625,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_x);
* Return non-zero if the q field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_q(union scsw *scsw)
+static inline int scsw_tm_is_valid_q(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_q);
/**
* scsw_tm_is_valid_ectl - check ectl field validity
@@ -504,13 +637,12 @@ EXPORT_SYMBOL(scsw_tm_is_valid_q);
* Return non-zero if the ectl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_ectl(union scsw *scsw)
+static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
/**
* scsw_tm_is_valid_pno - check pno field validity
@@ -519,7 +651,7 @@ EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
* Return non-zero if the pno field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_pno(union scsw *scsw)
+static inline int scsw_tm_is_valid_pno(union scsw *scsw)
{
return (scsw->tm.fctl != 0) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
@@ -527,7 +659,6 @@ int scsw_tm_is_valid_pno(union scsw *scsw)
((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
}
-EXPORT_SYMBOL(scsw_tm_is_valid_pno);
/**
* scsw_tm_is_valid_fctl - check fctl field validity
@@ -536,12 +667,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_pno);
* Return non-zero if the fctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_fctl(union scsw *scsw)
+static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
/**
* scsw_tm_is_valid_actl - check actl field validity
@@ -550,12 +680,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
* Return non-zero if the actl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_actl(union scsw *scsw)
+static inline int scsw_tm_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_actl);
/**
* scsw_tm_is_valid_stctl - check stctl field validity
@@ -564,12 +693,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_actl);
* Return non-zero if the stctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_stctl(union scsw *scsw)
+static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
/**
* scsw_tm_is_valid_dstat - check dstat field validity
@@ -578,12 +706,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
* Return non-zero if the dstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_dstat(union scsw *scsw)
+static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
/**
* scsw_tm_is_valid_cstat - check cstat field validity
@@ -592,12 +719,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
* Return non-zero if the cstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_cstat(union scsw *scsw)
+static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
/**
* scsw_tm_is_valid_fcxs - check fcxs field validity
@@ -606,11 +732,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
* Return non-zero if the fcxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_fcxs(union scsw *scsw)
+static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
/**
* scsw_tm_is_valid_schxs - check schxs field validity
@@ -619,14 +744,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
* Return non-zero if the schxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_schxs(union scsw *scsw)
+static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
{
return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_PROT_CHECK |
SCHN_STAT_CHN_DATA_CHK));
}
-EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
/**
* scsw_is_valid_actl - check actl field validity
@@ -636,14 +760,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_actl(union scsw *scsw)
+static inline int scsw_is_valid_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_actl(scsw);
else
return scsw_cmd_is_valid_actl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_actl);
/**
* scsw_is_valid_cc - check cc field validity
@@ -653,14 +776,13 @@ EXPORT_SYMBOL(scsw_is_valid_actl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_cc(union scsw *scsw)
+static inline int scsw_is_valid_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cc(scsw);
else
return scsw_cmd_is_valid_cc(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_cc);
/**
* scsw_is_valid_cstat - check cstat field validity
@@ -670,14 +792,13 @@ EXPORT_SYMBOL(scsw_is_valid_cc);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_cstat(union scsw *scsw)
+static inline int scsw_is_valid_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cstat(scsw);
else
return scsw_cmd_is_valid_cstat(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_cstat);
/**
* scsw_is_valid_dstat - check dstat field validity
@@ -687,14 +808,13 @@ EXPORT_SYMBOL(scsw_is_valid_cstat);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_dstat(union scsw *scsw)
+static inline int scsw_is_valid_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_dstat(scsw);
else
return scsw_cmd_is_valid_dstat(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_dstat);
/**
* scsw_is_valid_ectl - check ectl field validity
@@ -704,14 +824,13 @@ EXPORT_SYMBOL(scsw_is_valid_dstat);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_ectl(union scsw *scsw)
+static inline int scsw_is_valid_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_ectl(scsw);
else
return scsw_cmd_is_valid_ectl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_ectl);
/**
* scsw_is_valid_eswf - check eswf field validity
@@ -721,14 +840,13 @@ EXPORT_SYMBOL(scsw_is_valid_ectl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_eswf(union scsw *scsw)
+static inline int scsw_is_valid_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_eswf(scsw);
else
return scsw_cmd_is_valid_eswf(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_eswf);
/**
* scsw_is_valid_fctl - check fctl field validity
@@ -738,14 +856,13 @@ EXPORT_SYMBOL(scsw_is_valid_eswf);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_fctl(union scsw *scsw)
+static inline int scsw_is_valid_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_fctl(scsw);
else
return scsw_cmd_is_valid_fctl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_fctl);
/**
* scsw_is_valid_key - check key field validity
@@ -755,14 +872,13 @@ EXPORT_SYMBOL(scsw_is_valid_fctl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_key(union scsw *scsw)
+static inline int scsw_is_valid_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_key(scsw);
else
return scsw_cmd_is_valid_key(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_key);
/**
* scsw_is_valid_pno - check pno field validity
@@ -772,14 +888,13 @@ EXPORT_SYMBOL(scsw_is_valid_key);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_pno(union scsw *scsw)
+static inline int scsw_is_valid_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_pno(scsw);
else
return scsw_cmd_is_valid_pno(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_pno);
/**
* scsw_is_valid_stctl - check stctl field validity
@@ -789,14 +904,13 @@ EXPORT_SYMBOL(scsw_is_valid_pno);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_stctl(union scsw *scsw)
+static inline int scsw_is_valid_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_stctl(scsw);
else
return scsw_cmd_is_valid_stctl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_stctl);
/**
* scsw_cmd_is_solicited - check for solicited scsw
@@ -805,12 +919,11 @@ EXPORT_SYMBOL(scsw_is_valid_stctl);
* Return non-zero if the command mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
-int scsw_cmd_is_solicited(union scsw *scsw)
+static inline int scsw_cmd_is_solicited(union scsw *scsw)
{
return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
-EXPORT_SYMBOL(scsw_cmd_is_solicited);
/**
* scsw_tm_is_solicited - check for solicited scsw
@@ -819,12 +932,11 @@ EXPORT_SYMBOL(scsw_cmd_is_solicited);
* Return non-zero if the transport mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
-int scsw_tm_is_solicited(union scsw *scsw)
+static inline int scsw_tm_is_solicited(union scsw *scsw)
{
return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
-EXPORT_SYMBOL(scsw_tm_is_solicited);
/**
* scsw_is_solicited - check for solicited scsw
@@ -833,11 +945,12 @@ EXPORT_SYMBOL(scsw_tm_is_solicited);
* Return non-zero if the transport or command mode scsw indicates that the
* associated status condition is solicited, zero if it is unsolicited.
*/
-int scsw_is_solicited(union scsw *scsw)
+static inline int scsw_is_solicited(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_solicited(scsw);
else
return scsw_cmd_is_solicited(scsw);
}
-EXPORT_SYMBOL(scsw_is_solicited);
+
+#endif /* _ASM_S390_SCSW_H_ */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 72137bc907ac..8ebcf8836aec 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -87,7 +87,7 @@ extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index cc21e3e20fd7..e4e7552d8151 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -88,6 +88,20 @@ int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
-extern u64 sched_clock_base_cc;
+extern u64 sched_clock_base;
+
+/**
+ * get_clock_monotonic - returns current time in clock rate units
+ *
+ * The caller must ensure that preemption is disabled.
+ * The clock and sched_clock_base get changed via stop_machine.
+ * Therefore preemption must be disabled when calling this
+ * function, otherwise the returned value is not guaranteed to
+ * be monotonic.
+ */
+static inline unsigned long long get_clock_monotonic(void)
+{
+ return get_clock_xt() - sched_clock_base;
+}
#endif
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 5e0ad618dc45..6e7211abd950 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,7 +9,6 @@ const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
extern cpumask_t cpu_core_map[NR_CPUS];
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b144049dc9..4088aecdd894 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -52,7 +52,7 @@ static void __init reset_tod_clock(void)
if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
disabled_wait(0);
- sched_clock_base_cc = TOD_UNIX_EPOCH;
+ sched_clock_base = TOD_UNIX_EPOCH;
}
#ifdef CONFIG_SHARED_KERNEL
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec6882348520..d5e6ee3b2c4f 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -535,7 +535,7 @@ startup:basr %r13,0 # get base
b 0(%r13)
.align 4
4: .long startup_continue
-5: .long sched_clock_base_cc
+5: .long sched_clock_base
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730f5354..c058a4d1edfb 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -129,11 +129,11 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
udelay(10);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
smp_ext_bitcall(cpu, ec_call_function);
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..6f0d86fb2aaa 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,7 +59,8 @@
#define TICK_SIZE tick
-u64 sched_clock_base_cc = -1; /* Force to data section. */
+u64 sched_clock_base = -1; /* Force to data section. */
+EXPORT_SYMBOL(sched_clock_base);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
- return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
+ return (get_clock_monotonic() * 125) >> 9;
}
/*
@@ -277,7 +278,7 @@ void __init time_init(void)
tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
clocksource_tod.cycle_last = now;
clocksource_tod.raw_time = xtime;
- tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
+ tod_to_timeval(sched_clock_base - TOD_UNIX_EPOCH, &ts);
set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -315,7 +316,7 @@ static unsigned long long adjust_time(unsigned long long old,
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
- sched_clock_base_cc += delta;
+ sched_clock_base += delta;
if (adjust.offset != 0) {
pr_notice("The ETR interface has adjusted the clock "
"by %li microseconds\n", adjust.offset);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a53db23ee092..98867dfea469 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -161,6 +161,7 @@ SECTIONS
/DISCARD/ : {
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
/* Debugging sections. */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 3e260b7e37b2..bf164fc21864 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -1,11 +1,7 @@
#
# KVM configuration
#
-config HAVE_KVM
- bool
-
-config HAVE_KVM_IRQCHIP
- bool
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -38,9 +34,6 @@ config KVM
If unsure, say N.
-config KVM_TRACE
- bool
-
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/virtio/Kconfig
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index ed60f3a74a85..03c716a0f01f 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,7 +1,7 @@
/*
* gaccess.h - access guest memory
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -16,13 +16,14 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/uaccess.h>
+#include "kvm-s390.h"
static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
unsigned long guestaddr)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->kvm->arch.guest_origin;
- unsigned long memsize = vcpu->kvm->arch.guest_memsize;
+ unsigned long origin = vcpu->arch.sie_block->gmsor;
+ unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if (guestaddr < 2 * PAGE_SIZE)
guestaddr += prefix;
@@ -158,8 +159,8 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
const void *from, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->kvm->arch.guest_origin;
- unsigned long memsize = vcpu->kvm->arch.guest_memsize;
+ unsigned long origin = vcpu->arch.sie_block->gmsor;
+ unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
@@ -209,8 +210,8 @@ static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->kvm->arch.guest_origin;
- unsigned long memsize = vcpu->kvm->arch.guest_memsize;
+ unsigned long origin = vcpu->arch.sie_block->gmsor;
+ unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
goto slowpath;
@@ -244,8 +245,8 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
{
- unsigned long origin = vcpu->kvm->arch.guest_origin;
- unsigned long memsize = vcpu->kvm->arch.guest_memsize;
+ unsigned long origin = vcpu->arch.sie_block->gmsor;
+ unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if (guestdest + n > memsize)
return -EFAULT;
@@ -262,8 +263,8 @@ static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
- unsigned long origin = vcpu->kvm->arch.guest_origin;
- unsigned long memsize = vcpu->kvm->arch.guest_memsize;
+ unsigned long origin = vcpu->arch.sie_block->gmsor;
+ unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if (guestsrc + n > memsize)
return -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 98997ccba501..ba9d8a7bc1ac 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,7 +1,7 @@
/*
* intercept.c - in-kernel handling for sie intercepts
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -128,7 +128,7 @@ static int handle_noop(struct kvm_vcpu *vcpu)
static int handle_stop(struct kvm_vcpu *vcpu)
{
- int rc;
+ int rc = 0;
vcpu->stat.exit_stop_request++;
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -141,12 +141,18 @@ static int handle_stop(struct kvm_vcpu *vcpu)
rc = -ENOTSUPP;
}
+ if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
+ vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
+ rc = SIE_INTERCEPT_RERUNVCPU;
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ }
+
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
rc = -ENOTSUPP;
- } else
- rc = 0;
+ }
+
spin_unlock_bh(&vcpu->arch.local_int.lock);
return rc;
}
@@ -158,9 +164,9 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
- <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){
+ <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
rc = fault_in_pages_writeable((char __user *)
- vcpu->kvm->arch.guest_origin +
+ vcpu->arch.sie_block->gmsor +
vcpu->arch.sie_block->prefix,
2*PAGE_SIZE);
if (rc)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c18b21d6991c..07ced89740d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,7 +1,7 @@
/*
* s390host.c -- hosting zSeries kernel virtual machines
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -10,6 +10,7 @@
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Christian Ehrhardt <ehrhardt@de.ibm.com>
*/
#include <linux/compiler.h>
@@ -25,6 +26,7 @@
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
+#include <asm/system.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -69,6 +71,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+static unsigned long long *facilities;
/* Section: not file related */
void kvm_arch_hardware_enable(void *garbage)
@@ -208,13 +211,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_destroy(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_destroy(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -276,18 +283,13 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->gbea = 1;
}
-/* The current code can have up to 256 pages for virtio */
-#define VIRTIODESCSPACE (256ul * 4096ul)
-
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
- vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
- vcpu->kvm->arch.guest_origin +
- VIRTIODESCSPACE - 1ul;
- vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
+ set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
vcpu->arch.sie_block->ecb = 2;
vcpu->arch.sie_block->eca = 0xC1002001U;
+ vcpu->arch.sie_block->fac = (int) (long) facilities;
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
@@ -316,8 +318,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
BUG_ON(!kvm->arch.sca);
if (!kvm->arch.sca->cpu[id].sda)
kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
- else
- BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
@@ -487,9 +487,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load(vcpu);
+rerun_vcpu:
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ kvm_s390_vcpu_set_mem(vcpu);
+
/* verify, that memory has been registered */
- if (!vcpu->kvm->arch.guest_memsize) {
+ if (!vcpu->arch.sie_block->gmslm) {
vcpu_put(vcpu);
+ VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
return -EINVAL;
}
@@ -506,6 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
break;
case KVM_EXIT_UNKNOWN:
+ case KVM_EXIT_INTR:
case KVM_EXIT_S390_RESET:
break;
default:
@@ -519,8 +526,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
rc = kvm_handle_sie_intercept(vcpu);
} while (!signal_pending(current) && !rc);
- if (signal_pending(current) && !rc)
+ if (rc == SIE_INTERCEPT_RERUNVCPU)
+ goto rerun_vcpu;
+
+ if (signal_pending(current) && !rc) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
rc = -EINTR;
+ }
if (rc == -ENOTSUPP) {
/* intercept cannot be handled in-kernel, prepare kvm-run */
@@ -673,6 +685,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int user_alloc)
{
int i;
+ struct kvm_vcpu *vcpu;
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
@@ -681,7 +694,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
vmas. It is okay to mmap() and munmap() stuff in this slot after
doing this call at any time */
- if (mem->slot || kvm->arch.guest_memsize)
+ if (mem->slot)
return -EINVAL;
if (mem->guest_phys_addr)
@@ -696,36 +709,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!user_alloc)
return -EINVAL;
- /* lock all vcpus */
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (!kvm->vcpus[i])
+ /* request update of sie control block for all available vcpus */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
- if (!mutex_trylock(&kvm->vcpus[i]->mutex))
- goto fail_out;
- }
-
- kvm->arch.guest_origin = mem->userspace_addr;
- kvm->arch.guest_memsize = mem->memory_size;
-
- /* update sie control blocks, and unlock all vcpus */
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm->vcpus[i]->arch.sie_block->gmsor =
- kvm->arch.guest_origin;
- kvm->vcpus[i]->arch.sie_block->gmslm =
- kvm->arch.guest_memsize +
- kvm->arch.guest_origin +
- VIRTIODESCSPACE - 1ul;
- mutex_unlock(&kvm->vcpus[i]->mutex);
- }
+ kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
return 0;
-
-fail_out:
- for (; i >= 0; i--)
- mutex_unlock(&kvm->vcpus[i]->mutex);
- return -EINVAL;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
@@ -739,11 +730,29 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
static int __init kvm_s390_init(void)
{
- return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ int ret;
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ if (ret)
+ return ret;
+
+ /*
+ * guests can ask for up to 255+1 double words, we need a full page
+ * to hold the maximum amount of facilites. On the other hand, we
+ * only set facilities that are known to work in KVM.
+ */
+ facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
+ if (!facilities) {
+ kvm_exit();
+ return -ENOMEM;
+ }
+ stfle(facilities, 1);
+ facilities[0] &= 0xff00fff3f0700000ULL;
+ return 0;
}
static void __exit kvm_s390_exit(void)
{
+ free_page((unsigned long) facilities);
kvm_exit();
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 748fee872323..ec5eee7c25d8 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,7 +1,7 @@
/*
* kvm_s390.h - definition for kvm on s390
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -9,6 +9,7 @@
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
+ * Christian Ehrhardt <ehrhardt@de.ibm.com>
*/
#ifndef ARCH_S390_KVM_S390_H
@@ -18,8 +19,13 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+/* The current code can have up to 256 pages for virtio */
+#define VIRTIODESCSPACE (256ul * 4096ul)
+
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
+/* negativ values are error codes, positive values for internal conditions */
+#define SIE_INTERCEPT_RERUNVCPU (1<<0)
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
@@ -50,6 +56,30 @@ int kvm_s390_inject_vm(struct kvm *kvm,
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
+
+static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sie_block->gmslm
+ - vcpu->arch.sie_block->gmsor
+ - VIRTIODESCSPACE + 1ul;
+}
+
+static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
+{
+ struct kvm_memory_slot *mem;
+
+ down_read(&vcpu->kvm->slots_lock);
+ mem = &vcpu->kvm->memslots[0];
+
+ vcpu->arch.sie_block->gmsor = mem->userspace_addr;
+ vcpu->arch.sie_block->gmslm =
+ mem->userspace_addr +
+ (mem->npages << PAGE_SHIFT) +
+ VIRTIODESCSPACE - 1ul;
+
+ up_read(&vcpu->kvm->slots_lock);
+}
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 93ecd06e1a74..d426aac8095d 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -158,7 +158,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_stfl++;
/* only pass the facility bits, which we can handle */
- facility_list &= 0xfe00fff3;
+ facility_list &= 0xff00fff3;
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list));
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 36678835034d..bdbd88ddef82 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,7 +1,7 @@
/*
* sigp.c - handlinge interprocessor communication
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -9,6 +9,7 @@
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
+ * Christian Ehrhardt <ehrhardt@de.ibm.com>
*/
#include <linux/kvm.h>
@@ -107,46 +108,57 @@ unlock:
return rc;
}
-static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
+static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
{
- struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
- struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
- int rc;
-
- if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
return -ENOMEM;
-
inti->type = KVM_S390_SIGP_STOP;
- spin_lock(&fi->lock);
- li = fi->local_int[cpu_addr];
- if (li == NULL) {
- rc = 3; /* not operational */
- kfree(inti);
- goto unlock;
- }
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
- if (store)
- li->action_bits |= ACTION_STORE_ON_STOP;
- li->action_bits |= ACTION_STOP_ON_STOP;
+ li->action_bits |= action;
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
spin_unlock_bh(&li->lock);
- rc = 0; /* order accepted */
+
+ return 0; /* order accepted */
+}
+
+static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
+{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_local_interrupt *li;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ spin_lock(&fi->lock);
+ li = fi->local_int[cpu_addr];
+ if (li == NULL) {
+ rc = 3; /* not operational */
+ goto unlock;
+ }
+
+ rc = __inject_sigp_stop(li, action);
+
unlock:
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
return rc;
}
+int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ return __inject_sigp_stop(li, action);
+}
+
static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
int rc;
@@ -177,9 +189,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
/* make sure that the new value is valid memory */
address = address & 0x7fffe000u;
if ((copy_from_guest(vcpu, &tmp,
- (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) ||
+ (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) ||
(copy_from_guest(vcpu, &tmp, (u64) (address +
- vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) {
+ vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
*reg |= SIGP_STAT_INVALID_PARAMETER;
return 1; /* invalid parameter */
}
@@ -261,11 +273,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
break;
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, 0);
+ rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
break;
case SIGP_STOP_STORE_STATUS:
vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, 1);
+ rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP);
break;
case SIGP_SET_ARCH:
vcpu->stat.instruction_sigp_arch++;
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 3f5f680726ed..97c1eca83cc2 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -36,9 +36,11 @@ static void __udelay_disabled(unsigned long usecs)
cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
__ctl_load(cr0 , 0, 0);
mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
+ lockdep_off();
trace_hardirqs_on();
__load_psw_mask(mask);
local_irq_disable();
+ lockdep_on();
__ctl_load(cr0_saved, 0, 0);
local_tick_enable(clock_saved);
set_clock_comparator(S390_lowcore.clock_comparator);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e2bdd7b94fd9..0fb99b0eac5b 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -10,6 +10,7 @@ config SUPERH
select EMBEDDED
select HAVE_CLK
select HAVE_IDE
+ select HAVE_LMB
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IOREMAP_PROT if MMU
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 8ece0b5bd028..39224b57c6ef 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -61,10 +61,6 @@ config EARLY_PRINTK
select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
the kernel command line option to toggle back and forth.
-config DEBUG_BOOTMEM
- depends on DEBUG_KERNEL
- bool "Debug BOOTMEM initialization"
-
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL && SUPERH32
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
index 9c3a33210d61..180455642a43 100644
--- a/arch/sh/boards/mach-se/7206/io.c
+++ b/arch/sh/boards/mach-se/7206/io.c
@@ -50,7 +50,7 @@ unsigned char se7206_inb_p(unsigned long port)
unsigned short se7206_inw(unsigned long port)
{
- return *port2adr(port);;
+ return *port2adr(port);
}
void se7206_outb(unsigned char value, unsigned long port)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 9cd04bd558b8..8fed45a2fb85 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -19,10 +19,13 @@
#include <linux/smc91x.h>
#include <linux/gpio.h>
#include <linux/input.h>
+#include <linux/usb/r8a66597.h>
#include <video/sh_mobile_lcdc.h>
#include <media/sh_mobile_ceu.h>
#include <asm/io.h>
#include <asm/heartbeat.h>
+#include <asm/sh_eth.h>
+#include <asm/clock.h>
#include <asm/sh_keysc.h>
#include <cpu/sh7724.h>
#include <mach-se/mach/se7724.h>
@@ -272,6 +275,62 @@ static struct platform_device keysc_device = {
},
};
+/* SH Eth */
+static struct resource sh_eth_resources[] = {
+ [0] = {
+ .start = SH_ETH_ADDR,
+ .end = SH_ETH_ADDR + 0x1FC,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 91,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+ },
+};
+
+struct sh_eth_plat_data sh_eth_plat = {
+ .phy = 0x1f, /* SMSC LAN8187 */
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+};
+
+static struct platform_device sh_eth_device = {
+ .name = "sh-eth",
+ .id = 0,
+ .dev = {
+ .platform_data = &sh_eth_plat,
+ },
+ .num_resources = ARRAY_SIZE(sh_eth_resources),
+ .resource = sh_eth_resources,
+};
+
+static struct r8a66597_platdata sh7724_usb0_host_data = {
+};
+
+static struct resource sh7724_usb0_host_resources[] = {
+ [0] = {
+ .start = 0xa4d80000,
+ .end = 0xa4d800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 65,
+ .end = 65,
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device sh7724_usb0_host_device = {
+ .name = "r8a66597_hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &sh7724_usb0_host_data,
+ },
+ .num_resources = ARRAY_SIZE(sh7724_usb0_host_resources),
+ .resource = sh7724_usb0_host_resources,
+};
+
static struct platform_device *ms7724se_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
@@ -280,11 +339,62 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&ceu0_device,
&ceu1_device,
&keysc_device,
+ &sh_eth_device,
+ &sh7724_usb0_host_device,
};
+#define EEPROM_OP 0xBA206000
+#define EEPROM_ADR 0xBA206004
+#define EEPROM_DATA 0xBA20600C
+#define EEPROM_STAT 0xBA206010
+#define EEPROM_STRT 0xBA206014
+static int __init sh_eth_is_eeprom_ready(void)
+{
+ int t = 10000;
+
+ while (t--) {
+ if (!ctrl_inw(EEPROM_STAT))
+ return 1;
+ cpu_relax();
+ }
+
+ printk(KERN_ERR "ms7724se can not access to eeprom\n");
+ return 0;
+}
+
+static void __init sh_eth_init(void)
+{
+ int i;
+ u16 mac[3];
+
+ /* check EEPROM status */
+ if (!sh_eth_is_eeprom_ready())
+ return;
+
+ /* read MAC addr from EEPROM */
+ for (i = 0 ; i < 3 ; i++) {
+ ctrl_outw(0x0, EEPROM_OP); /* read */
+ ctrl_outw(i*2, EEPROM_ADR);
+ ctrl_outw(0x1, EEPROM_STRT);
+ if (!sh_eth_is_eeprom_ready())
+ return;
+
+ mac[i] = ctrl_inw(EEPROM_DATA);
+ mac[i] = ((mac[i] & 0xFF) << 8) | (mac[i] >> 8); /* swap */
+ }
+
+ /* reset sh-eth */
+ ctrl_outl(0x1, SH_ETH_ADDR + 0x0);
+
+ /* set MAC addr */
+ ctrl_outl(((mac[0] << 16) | (mac[1])), SH_ETH_MAHR);
+ ctrl_outl((mac[2]), SH_ETH_MALR);
+}
+
#define SW4140 0xBA201000
#define FPGA_OUT 0xBA200400
#define PORT_HIZA 0xA4050158
+#define PORT_MSELCRB 0xA4050182
#define SW41_A 0x0100
#define SW41_B 0x0200
@@ -294,6 +404,7 @@ static struct platform_device *ms7724se_devices[] __initdata = {
#define SW41_F 0x2000
#define SW41_G 0x4000
#define SW41_H 0x8000
+
static int __init devices_setup(void)
{
u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
@@ -302,9 +413,16 @@ static int __init devices_setup(void)
ctrl_outw(ctrl_inw(FPGA_OUT) &
~((1 << 1) | /* LAN */
(1 << 6) | /* VIDEO DAC */
- (1 << 12)), /* USB0 */
+ (1 << 12) | /* USB0 */
+ (1 << 14)), /* RMII */
FPGA_OUT);
+ /* turn on USB clocks, use external clock */
+ ctrl_outw((ctrl_inw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
+
+ /* enable USB0 port */
+ ctrl_outw(0x0600, 0xa40501d4);
+
/* enable IRQ 0,1,2 */
gpio_request(GPIO_FN_INTC_IRQ0, NULL);
gpio_request(GPIO_FN_INTC_IRQ1, NULL);
@@ -374,7 +492,7 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO0_CLK, NULL);
gpio_request(GPIO_FN_VIO0_FLD, NULL);
gpio_request(GPIO_FN_VIO0_HD, NULL);
- platform_resource_setup_memory(&ceu0_device, "ceu", 4 << 20);
+ platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20);
/* enable CEU1 */
gpio_request(GPIO_FN_VIO1_D7, NULL);
@@ -389,7 +507,7 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO1_HD, NULL);
gpio_request(GPIO_FN_VIO1_VD, NULL);
gpio_request(GPIO_FN_VIO1_CLK, NULL);
- platform_resource_setup_memory(&ceu1_device, "ceu", 4 << 20);
+ platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20);
/* KEYSC */
gpio_request(GPIO_FN_KEYOUT5_IN5, NULL);
@@ -404,6 +522,28 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_KEYOUT1, NULL);
gpio_request(GPIO_FN_KEYOUT0, NULL);
+ /*
+ * enable SH-Eth
+ *
+ * please remove J33 pin from your board !!
+ *
+ * ms7724 board should not use GPIO_FN_LNKSTA pin
+ * So, This time PTX5 is set to input pin
+ */
+ gpio_request(GPIO_FN_RMII_RXD0, NULL);
+ gpio_request(GPIO_FN_RMII_RXD1, NULL);
+ gpio_request(GPIO_FN_RMII_TXD0, NULL);
+ gpio_request(GPIO_FN_RMII_TXD1, NULL);
+ gpio_request(GPIO_FN_RMII_REF_CLK, NULL);
+ gpio_request(GPIO_FN_RMII_TX_EN, NULL);
+ gpio_request(GPIO_FN_RMII_RX_ER, NULL);
+ gpio_request(GPIO_FN_RMII_CRS_DV, NULL);
+ gpio_request(GPIO_FN_MDIO, NULL);
+ gpio_request(GPIO_FN_MDC, NULL);
+ gpio_request(GPIO_PTX5, NULL);
+ gpio_direction_input(GPIO_PTX5);
+ sh_eth_init();
+
if (sw & SW41_B) {
/* SVGA */
lcdc_info.ch[0].lcd_cfg.xres = 800;
@@ -437,7 +577,7 @@ static int __init devices_setup(void)
}
return platform_add_devices(ms7724se_devices,
- ARRAY_SIZE(ms7724se_devices));
+ ARRAY_SIZE(ms7724se_devices));
}
device_initcall(devices_setup);
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index da627d22c009..b18cfd39cac6 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -309,7 +309,7 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp"
+CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp"
#
# Bus options
@@ -858,7 +858,35 @@ CONFIG_VIDEO_SH_MOBILE_CEU=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+CONFIG_FB_SYS_FILLRECT=y
+CONFIG_FB_SYS_COPYAREA=y
+CONFIG_FB_SYS_IMAGEBLIT=y
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=y
+CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_SH_MOBILE_LCDC=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -870,6 +898,27 @@ CONFIG_VIDEO_SH_MOBILE_CEU=y
# Console display driver support
#
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_MINI_4x6=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_LOGO_SUPERH_MONO is not set
+CONFIG_LOGO_SUPERH_VGA16=y
+# CONFIG_LOGO_SUPERH_CLUT224 is not set
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 3840270283e4..3ee783a0a075 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.30
-# Thu Jun 18 16:09:05 2009
+# Mon Jun 29 16:28:43 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -88,10 +91,12 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_COUNTERS=y
#
# Performance Counters
#
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
@@ -107,6 +112,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -119,7 +128,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -584,7 +593,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -624,7 +632,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
# CONFIG_STNIC is not set
-# CONFIG_SH_ETH is not set
+CONFIG_SH_ETH=y
CONFIG_SMC91X=y
# CONFIG_ENC28J60 is not set
# CONFIG_ETHOC is not set
@@ -801,6 +809,11 @@ CONFIG_SPI_BITBANG=y
#
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_GPIO_SYSFS is not set
@@ -851,6 +864,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1196,6 +1211,7 @@ CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1260,6 +1276,7 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
CONFIG_FILE_LOCKING=y
diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h
new file mode 100644
index 000000000000..51a46f496639
--- /dev/null
+++ b/arch/sh/include/asm/hwblk.h
@@ -0,0 +1,61 @@
+#ifndef __ASM_SH_HWBLK_H
+#define __ASM_SH_HWBLK_H
+
+#include <asm/clock.h>
+#include <asm/io.h>
+
+#define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */
+
+#define HWBLK_AREA(_flags, _parent) \
+{ \
+ .flags = _flags, \
+ .parent = _parent, \
+}
+
+struct hwblk_area {
+ unsigned long cnt;
+ unsigned char parent;
+ unsigned char flags;
+};
+
+#define HWBLK(_mstp, _bit, _area) \
+{ \
+ .mstp = (void __iomem *)_mstp, \
+ .bit = _bit, \
+ .area = _area, \
+}
+
+struct hwblk {
+ void __iomem *mstp;
+ unsigned char bit;
+ unsigned char area;
+ unsigned long cnt;
+};
+
+struct hwblk_info {
+ struct hwblk_area *areas;
+ int nr_areas;
+ struct hwblk *hwblks;
+ int nr_hwblks;
+};
+
+/* Should be defined by processor-specific code */
+int arch_hwblk_init(void);
+int arch_hwblk_sleep_mode(void);
+
+int hwblk_register(struct hwblk_info *info);
+int hwblk_init(void);
+
+/* allow clocks to enable and disable hardware blocks */
+#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \
+{ \
+ .name = _name, \
+ .id = _id, \
+ .parent = _parent, \
+ .arch_flags = _hwblk, \
+ .flags = _flags, \
+}
+
+int sh_hwblk_clk_register(struct clk *clks, int nr);
+
+#endif /* __ASM_SH_HWBLK_H */
diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h
new file mode 100644
index 000000000000..9b437f657ffa
--- /dev/null
+++ b/arch/sh/include/asm/lmb.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_SH_LMB_H
+#define __ASM_SH_LMB_H
+
+#define LMB_REAL_LIMIT 0
+
+#endif /* __ASM_SH_LMB_H */
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index d3633f513ebc..4163950cd1c6 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -10,7 +10,6 @@
or architectures with incomplete PCI setup by the loader */
#define pcibios_assign_all_busses() 1
-#define pcibios_scan_all_fns(a, b) 0
/*
* A board can define one or more PCI channels that represent built-in (or
diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h
index a8153c2aa6fa..d8e6bb9c0ccc 100644
--- a/arch/sh/include/asm/perf_counter.h
+++ b/arch/sh/include/asm/perf_counter.h
@@ -2,6 +2,8 @@
#define __ASM_SH_PERF_COUNTER_H
/* SH only supports software counters through this interface. */
-#define set_perf_counter_pending() do { } while (0)
+static inline void set_perf_counter_pending(void) {}
+
+#define PERF_COUNTER_INDEX_OFFSET 0
#endif /* __ASM_SH_PERF_COUNTER_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index ca64f43abe67..53ef26ced75f 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -44,7 +44,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message);
void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index b1b995370e79..5c8ea28ff7a4 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -10,6 +10,15 @@ struct swsusp_arch_regs {
struct pt_regs user_regs;
unsigned long bank1_regs[8];
};
+
+void sh_mobile_call_standby(unsigned long mode);
+
+#ifdef CONFIG_CPU_IDLE
+void sh_mobile_setup_cpuidle(void);
+#else
+static inline void sh_mobile_setup_cpuidle(void) {}
+#endif
+
#endif
/* flags passed to assembly suspend code */
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 5bc34681d994..6f83f2cc45c1 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/err.h>
#include <asm/ptrace.h>
/* The system call number is given by the user in R3 */
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index b69ee850906d..b2180f8752ce 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -31,7 +31,6 @@
#define cpu_to_node(cpu) ((void)(cpu),0)
#define parent_node(node) ((void)(node),0)
-#define node_to_cpumask(node) ((void)node, cpu_online_map)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
index 738ea43c5038..48560407cbe1 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7722.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -221,4 +221,18 @@ enum {
GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5,
};
+enum {
+ HWBLK_UNKNOWN = 0,
+ HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_URAM, HWBLK_XYMEM,
+ HWBLK_INTC, HWBLK_DMAC, HWBLK_SHYWAY, HWBLK_HUDI,
+ HWBLK_UBC, HWBLK_TMU, HWBLK_CMT, HWBLK_RWDT, HWBLK_FLCTL,
+ HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, HWBLK_SIO,
+ HWBLK_SIOF0, HWBLK_SIOF1, HWBLK_IIC, HWBLK_RTC,
+ HWBLK_TPU, HWBLK_IRDA, HWBLK_SDHI, HWBLK_SIM, HWBLK_KEYSC,
+ HWBLK_TSIF, HWBLK_USBF, HWBLK_2DG, HWBLK_SIU, HWBLK_VOU,
+ HWBLK_JPU, HWBLK_BEU, HWBLK_CEU, HWBLK_VEU, HWBLK_VPU,
+ HWBLK_LCDC,
+ HWBLK_NR,
+};
+
#endif /* __ASM_SH7722_H__ */
diff --git a/arch/sh/include/mach-se/mach/se7724.h b/arch/sh/include/mach-se/mach/se7724.h
index 74164b60d0db..29514a39d0f5 100644
--- a/arch/sh/include/mach-se/mach/se7724.h
+++ b/arch/sh/include/mach-se/mach/se7724.h
@@ -20,6 +20,11 @@
*/
#include <asm/addrspace.h>
+/* SH Eth */
+#define SH_ETH_ADDR (0xA4600000)
+#define SH_ETH_MAHR (SH_ETH_ADDR + 0x1C0)
+#define SH_ETH_MALR (SH_ETH_ADDR + 0x1C8)
+
#define PA_LED (0xba203000) /* 8bit LED */
#define IRQ_MODE (0xba200010)
#define IRQ0_SR (0xba200014)
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index eecad7cbd61e..3d6b9312dc47 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -19,4 +19,4 @@ obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
-obj-y += irq/ init.o clock.o
+obj-y += irq/ init.o clock.o hwblk.o
diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c
new file mode 100644
index 000000000000..7c3a73deff24
--- /dev/null
+++ b/arch/sh/kernel/cpu/hwblk.c
@@ -0,0 +1,130 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <asm/clock.h>
+
+static DEFINE_SPINLOCK(hwblk_lock);
+
+static void hwblk_area_inc(struct hwblk_info *info, int area)
+{
+ struct hwblk_area *hap = info->areas + area;
+
+ hap->cnt++;
+ if (hap->cnt == 1)
+ if (hap->flags & HWBLK_AREA_FLAG_PARENT)
+ hwblk_area_inc(info, hap->parent);
+}
+
+static void hwblk_area_dec(struct hwblk_info *info, int area)
+{
+ struct hwblk_area *hap = info->areas + area;
+
+ if (hap->cnt == 1)
+ if (hap->flags & HWBLK_AREA_FLAG_PARENT)
+ hwblk_area_dec(info, hap->parent);
+ hap->cnt--;
+}
+
+static void hwblk_enable(struct hwblk_info *info, int hwblk)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+ unsigned long tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+
+ hp->cnt++;
+ if (hp->cnt == 1) {
+ hwblk_area_inc(info, hp->area);
+
+ tmp = __raw_readl(hp->mstp);
+ tmp &= ~(1 << hp->bit);
+ __raw_writel(tmp, hp->mstp);
+ }
+
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+static void hwblk_disable(struct hwblk_info *info, int hwblk)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+ unsigned long tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+
+ if (hp->cnt == 1) {
+ hwblk_area_dec(info, hp->area);
+
+ tmp = __raw_readl(hp->mstp);
+ tmp |= 1 << hp->bit;
+ __raw_writel(tmp, hp->mstp);
+ }
+ hp->cnt--;
+
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+static struct hwblk_info *hwblk_info;
+
+int __init hwblk_register(struct hwblk_info *info)
+{
+ hwblk_info = info;
+ return 0;
+}
+
+int __init __weak arch_hwblk_init(void)
+{
+ return 0;
+}
+
+int __weak arch_hwblk_sleep_mode(void)
+{
+ return SUSP_SH_SLEEP;
+}
+
+int __init hwblk_init(void)
+{
+ return arch_hwblk_init();
+}
+
+/* allow clocks to enable and disable hardware blocks */
+static int sh_hwblk_clk_enable(struct clk *clk)
+{
+ if (!hwblk_info)
+ return -ENOENT;
+
+ hwblk_enable(hwblk_info, clk->arch_flags);
+ return 0;
+}
+
+static void sh_hwblk_clk_disable(struct clk *clk)
+{
+ if (hwblk_info)
+ hwblk_disable(hwblk_info, clk->arch_flags);
+}
+
+static struct clk_ops sh_hwblk_clk_ops = {
+ .enable = sh_hwblk_clk_enable,
+ .disable = sh_hwblk_clk_disable,
+ .recalc = followparent_recalc,
+};
+
+int __init sh_hwblk_clk_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ int ret = 0;
+ int k;
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+ clkp->ops = &sh_hwblk_clk_ops;
+ ret |= clk_register(clkp);
+ }
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index ebdd391d5f42..3cafda696378 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -25,7 +25,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o
clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o
clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o
clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o
clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 40f859354f79..1fa9e1dd1cc8 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
/* SH7722 registers */
#define FRQCR 0xa4150000
@@ -140,35 +142,37 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _flags) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags)
+#define R_CLK &r_clk
+#define P_CLK &div4_clks[DIV4_P]
+#define B_CLK &div4_clks[DIV4_B]
+#define U_CLK &div4_clks[DIV4_U]
static struct clk mstp_clks[] = {
- MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
- MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 8, 0),
-
- MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0),
- MSTP("keysc0", &r_clk, MSTPCR2, 14, 0),
- MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0),
- MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
+ SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+
+ SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0),
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -191,7 +195,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
new file mode 100644
index 000000000000..00a1c02d82b1
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
@@ -0,0 +1,106 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
+ *
+ * SH7722 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
+
+/* SH7722 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7722 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7722_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7722_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA),
+ [HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+
+ [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
+
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA),
+ [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA),
+ [HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
+ [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
+ [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7722_hwblk_info = {
+ .areas = sh7722_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7722_hwblk_area),
+ .hwblks = sh7722_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7722_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7722_hwblk_area[CORE_AREA].cnt)
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7722_hwblk_area[CORE_AREA_BM].cnt)
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7722_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
index 08bfa7c7db29..e8a5111e848a 100644
--- a/arch/sh/kernel/cpu/shmobile/Makefile
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -4,3 +4,4 @@
# Power Management & Sleep mode
obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 000000000000..4afdd975cc66
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,102 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/cpuidle.c
+ *
+ * Cpuidle support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpuidle.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+#include <asm/hwblk.h>
+
+static unsigned long cpuidle_mode[] = {
+ SUSP_SH_SLEEP, /* regular sleep mode */
+ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
+};
+
+static int cpuidle_sleep_enter(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ unsigned long allowed_mode = arch_hwblk_sleep_mode();
+ ktime_t before, after;
+ int requested_state = state - &dev->states[0];
+ int allowed_state;
+ int k;
+
+ /* convert allowed mode to allowed state */
+ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
+ if (cpuidle_mode[k] == allowed_mode)
+ break;
+
+ allowed_state = k;
+
+ /* take the following into account for sleep mode selection:
+ * - allowed_state: best mode allowed by hardware (clock deps)
+ * - requested_state: best mode allowed by software (latencies)
+ */
+ k = min_t(int, allowed_state, requested_state);
+
+ dev->last_state = &dev->states[k];
+ before = ktime_get();
+ sh_mobile_call_standby(cpuidle_mode[k]);
+ after = ktime_get();
+ return ktime_to_ns(ktime_sub(after, before)) >> 10;
+}
+
+static struct cpuidle_device cpuidle_dev;
+static struct cpuidle_driver cpuidle_driver = {
+ .name = "sh_idle",
+ .owner = THIS_MODULE,
+};
+
+void sh_mobile_setup_cpuidle(void)
+{
+ struct cpuidle_device *dev = &cpuidle_dev;
+ struct cpuidle_state *state;
+ int i;
+
+ cpuidle_register_driver(&cpuidle_driver);
+
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ dev->states[i].name[0] = '\0';
+ dev->states[i].desc[0] = '\0';
+ }
+
+ i = CPUIDLE_DRIVER_STATE_START;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
+ strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
+ state->exit_latency = 1;
+ state->target_residency = 1 * 2;
+ state->power_usage = 3;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_SHALLOW;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ dev->safe_state = state;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+ strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN);
+ state->exit_latency = 100;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ dev->state_count = i;
+
+ cpuidle_register_device(dev);
+}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 8c067adf6830..de078d24ce56 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -1,5 +1,5 @@
/*
- * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c
+ * arch/sh/kernel/cpu/shmobile/pm.c
*
* Power management support code for SuperH Mobile
*
@@ -32,20 +32,17 @@
*
* R-standby mode is unsupported, but will be added in the future
* U-standby mode is low priority since it needs bootloader hacks
- *
- * All modes should be tied in with cpuidle. But before that can
- * happen we need to keep track of enabled hardware blocks so we
- * can avoid entering sleep modes that stop clocks to hardware
- * blocks that are in use even though the cpu core is idle.
*/
+#define ILRAM_BASE 0xe5200000
+
extern const unsigned char sh_mobile_standby[];
extern const unsigned int sh_mobile_standby_size;
-static void sh_mobile_call_standby(unsigned long mode)
+void sh_mobile_call_standby(unsigned long mode)
{
extern void *vbr_base;
- void *onchip_mem = (void *)0xe5200000; /* ILRAM */
+ void *onchip_mem = (void *)ILRAM_BASE;
void (*standby_onchip_mem)(unsigned long) = onchip_mem;
/* Note: Wake up from sleep may generate exceptions!
@@ -55,11 +52,6 @@ static void sh_mobile_call_standby(unsigned long mode)
if (mode & SUSP_SH_SF)
asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory");
- /* Copy the assembly snippet to the otherwise ununsed ILRAM */
- memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
- wmb();
- ctrl_barrier();
-
/* Let assembly snippet in on-chip memory handle the rest */
standby_onchip_mem(mode);
@@ -85,7 +77,15 @@ static struct platform_suspend_ops sh_pm_ops = {
static int __init sh_pm_init(void)
{
+ void *onchip_mem = (void *)ILRAM_BASE;
+
+ /* Copy the assembly snippet to the otherwise ununsed ILRAM */
+ memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
+ wmb();
+ ctrl_barrier();
+
suspend_set_ops(&sh_pm_ops);
+ sh_mobile_setup_cpuidle();
return 0;
}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index dd38338553ef..ceb409bf7741 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -30,6 +30,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/lmb.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -233,39 +234,45 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
void __init setup_bootmem_allocator(unsigned long free_pfn)
{
unsigned long bootmap_size;
+ unsigned long bootmap_pages, bootmem_paddr;
+ u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT;
+ int i;
+
+ bootmap_pages = bootmem_bootmap_pages(total_pages);
+
+ bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
/*
* Find a proper area for the bootmem bitmap. After this
* bootstrap step all allocations (until the page allocator
* is intact) must be done via bootmem_alloc().
*/
- bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ bootmem_paddr >> PAGE_SHIFT,
min_low_pfn, max_low_pfn);
- __add_active_range(0, min_low_pfn, max_low_pfn);
- register_bootmem_low_pages();
-
- node_set_online(0);
+ /* Add active regions with valid PFNs. */
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ unsigned long start_pfn, end_pfn;
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ __add_active_range(0, start_pfn, end_pfn);
+ }
/*
- * Reserve the kernel text and
- * Reserve the bootmem bitmap. We do this in two steps (first step
- * was init_bootmem()), because this catches the (definitely buggy)
- * case of us accidentally initializing the bootmem allocator with
- * an invalid RAM area.
+ * Add all physical memory to the bootmem map and mark each
+ * area as present.
*/
- reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
- BOOTMEM_DEFAULT);
+ register_bootmem_low_pages();
- /*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
- */
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
+ /* Reserve the sections we're already using. */
+ for (i = 0; i < lmb.reserved.cnt; i++)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i),
BOOTMEM_DEFAULT);
+ node_set_online(0);
+
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_BLK_DEV_INITRD
@@ -296,12 +303,37 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
static void __init setup_memory(void)
{
unsigned long start_pfn;
+ u64 base = min_low_pfn << PAGE_SHIFT;
+ u64 size = (max_low_pfn << PAGE_SHIFT) - base;
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(__pa(_end));
+
+ lmb_add(base, size);
+
+ /*
+ * Reserve the kernel text and
+ * Reserve the bootmem bitmap. We do this in two steps (first step
+ * was init_bootmem()), because this catches the (definitely buggy)
+ * case of us accidentally initializing the bootmem allocator with
+ * an invalid RAM area.
+ */
+ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+ (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
+ (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
+
+ /*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+
+ lmb_analyze();
+ lmb_dump_all();
+
setup_bootmem_allocator(start_pfn);
}
#else
@@ -402,6 +434,7 @@ void __init setup_arch(char **cmdline_p)
nodes_clear(node_online_map);
/* Setup bootmem with available RAM */
+ lmb_init();
setup_memory();
sparse_init();
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 9b352a1e3fb4..d2424b068b7b 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/rtc.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
#include <asm/rtc.h>
/* Dummy RTC ops */
@@ -96,6 +97,7 @@ void __init time_init(void)
if (board_time_init)
board_time_init();
+ hwblk_init();
clk_init();
rtc_sh_get_time(&xtime);
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index f53c76acaede..766976d27b21 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -171,6 +171,7 @@ SECTIONS
*/
/DISCARD/ : {
*(.exitcall.exit)
+ *(.discard)
}
STABS_DEBUG
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index cc8ddbdf3d7a..71925946f1e1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,12 +15,28 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
-#include <linux/marker.h>
+#include <linux/perf_counter.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+ int ret = 0;
+
+#ifdef CONFIG_KPROBES
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, trap))
+ ret = 1;
+ preempt_enable();
+ }
+#endif
+
+ return ret;
+}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -87,13 +103,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
return;
}
+ mm = tsk->mm;
+
+ if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
+ return;
+
/* Only enable interrupts if they were on before the fault */
- if ((regs->sr & SR_IMASK) != SR_IMASK) {
- trace_hardirqs_on();
+ if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
- }
- mm = tsk->mm;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt or have no user
@@ -141,10 +160,15 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
tsk->min_flt++;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
up_read(&mm->mmap_sem);
return;
@@ -245,22 +269,6 @@ do_sigbus:
goto no_context;
}
-static inline int notify_page_fault(struct pt_regs *regs, int trap)
-{
- int ret = 0;
-
-#ifdef CONFIG_KPROBES
- if (!user_mode(regs)) {
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, trap))
- ret = 1;
- preempt_enable();
- }
-#endif
-
- return ret;
-}
-
/*
* Called with interrupts disabled.
*/
@@ -273,12 +281,7 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pmd_t *pmd;
pte_t *pte;
pte_t entry;
- int ret = 0;
-
- if (notify_page_fault(regs, lookup_exception_vector()))
- goto out;
-
- ret = 1;
+ int ret = 1;
/*
* We don't take page faults for P1, P2, and parts of P4, these
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 095d93bec7cd..9b784fdb947c 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -9,6 +9,7 @@
*/
#include <linux/module.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/pfn.h>
@@ -26,6 +27,15 @@ EXPORT_SYMBOL_GPL(node_data);
void __init setup_memory(void)
{
unsigned long free_pfn = PFN_UP(__pa(_end));
+ u64 base = min_low_pfn << PAGE_SHIFT;
+ u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn;
+
+ lmb_add(base, size);
+
+ /* Reserve the LMB regions used by the kernel, initrd, etc.. */
+ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+ (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
+ (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
/*
* Node 0 sets up its pgdat at the first available pfn,
@@ -45,24 +55,23 @@ void __init setup_memory(void)
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
- unsigned long bootmap_pages, bootmap_start, bootmap_size;
- unsigned long start_pfn, free_pfn, end_pfn;
+ unsigned long bootmap_pages;
+ unsigned long start_pfn, end_pfn;
+ unsigned long bootmem_paddr;
/* Don't allow bogus node assignment */
BUG_ON(nid > MAX_NUMNODES || nid == 0);
- /*
- * The free pfn starts at the beginning of the range, and is
- * advanced as necessary for pgdat and node map allocations.
- */
- free_pfn = start_pfn = start >> PAGE_SHIFT;
+ start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
+ lmb_add(start, end - start);
+
__add_active_range(nid, start_pfn, end_pfn);
/* Node-local pgdat */
- NODE_DATA(nid) = pfn_to_kaddr(free_pfn);
- free_pfn += PFN_UP(sizeof(struct pglist_data));
+ NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, end_pfn));
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
@@ -71,16 +80,17 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local bootmap */
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn);
- bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn,
- end_pfn);
+ bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
+ PAGE_SIZE, end_pfn);
+ init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
+ start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/* Reserve the pgdat and bootmap space with the bootmem allocator */
reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
sizeof(struct pglist_data), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT,
+ reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
/* It's up */
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index fcbb6e135cef..3ce40ea34824 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
- * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2003 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -20,6 +20,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/perf_counter.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -115,6 +116,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
/* Not an IO address, so reenable interrupts */
local_irq_enable();
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -195,10 +198,16 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
tsk->min_flt++;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
/* If we get here, the page fault has been handled. Do the TLB refill
now from the newly-setup PTE, to avoid having to fault again right
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3f8b6a92eabd..4f6ed0f113f0 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -95,9 +95,6 @@ config AUDIT_ARCH
config HAVE_SETUP_PER_CPU_AREA
def_bool y if SPARC64
-config HAVE_DYNAMIC_PER_CPU_AREA
- def_bool y if SPARC64
-
config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y if SPARC64
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 96041a8d39e8..1ff0fd924756 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -15,7 +15,7 @@ quiet_cmd_elftoaout = ELFTOAOUT $@
ifeq ($(CONFIG_SPARC32),y)
quiet_cmd_piggy = PIGGY $@
- cmd_piggy = $(obj)/piggyback_32 $@ $(obj)/System.map $(ROOT_IMG)
+ cmd_piggy = $(obj)/piggyback_32 $@ System.map $(ROOT_IMG)
quiet_cmd_btfix = BTFIX $@
cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
quiet_cmd_sysmap = SYSMAP $(obj)/System.map
@@ -58,7 +58,7 @@ $(obj)/image: $(obj)/btfix.o FORCE
$(obj)/zImage: $(obj)/image
$(call if_changed,strip)
-$(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
+$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_32 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
@@ -79,7 +79,7 @@ $(obj)/image: vmlinux FORCE
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
-$(obj)/tftpboot.img: vmlinux $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
+$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
@echo ' kernel: $@ is ready'
diff --git a/arch/sparc/boot/piggyback_32.c b/arch/sparc/boot/piggyback_32.c
index c9f500c1a8b2..e8dc9adfcd61 100644
--- a/arch/sparc/boot/piggyback_32.c
+++ b/arch/sparc/boot/piggyback_32.c
@@ -70,7 +70,7 @@ void die(char *str)
int main(int argc,char **argv)
{
static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 };
- unsigned char buffer[1024], *q, *r;
+ char buffer[1024], *q, *r;
unsigned int i, j, k, start, end, offset;
FILE *map;
struct stat s;
@@ -84,7 +84,7 @@ int main(int argc,char **argv)
while (fgets (buffer, 1024, map)) {
if (!strcmp (buffer + 8, " T start\n") || !strcmp (buffer + 16, " T start\n"))
start = strtoul (buffer, NULL, 16);
- else if (!strcmp (buffer + 8, " A end\n") || !strcmp (buffer + 16, " A end\n"))
+ else if (!strcmp (buffer + 8, " A _end\n") || !strcmp (buffer + 16, " A _end\n"))
end = strtoul (buffer, NULL, 16);
}
fclose (map);
diff --git a/arch/sparc/boot/piggyback_64.c b/arch/sparc/boot/piggyback_64.c
index de364bfed0bb..c63fd1b6bdd4 100644
--- a/arch/sparc/boot/piggyback_64.c
+++ b/arch/sparc/boot/piggyback_64.c
@@ -46,6 +46,7 @@ int main(int argc,char **argv)
struct stat s;
int image, tail;
+ start = end = 0;
if (stat (argv[3], &s) < 0) die (argv[3]);
map = fopen (argv[2], "r");
if (!map) die(argv[2]);
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index 3702e087df2c..f3b85b6b0b76 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -32,4 +32,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
return ad->prom_node;
}
+struct pdev_archdata {
+};
+
#endif /* _ASM_SPARC_DEVICE_H */
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..810d9248e23f 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -10,7 +10,6 @@
* or architectures with incomplete PCI setup by the loader.
*/
#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO 0UL
#define PCIBIOS_MIN_MEM 0UL
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..a32970888287 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -10,7 +10,6 @@
* or architectures with incomplete PCI setup by the loader.
*/
#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO 0UL
#define PCIBIOS_MIN_MEM 0UL
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index becb6bf353a9..f49e11cd4ded 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -36,7 +36,6 @@ extern int sparc64_multi_core;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index e5ea8d332421..0c35499efe36 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
-static inline cpumask_t node_to_cpumask(int node)
-{
- return numa_cpumask_lookup_table[node];
-}
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
-/*
- * Returns a pointer to the cpumask of CPUs on Node 'node'.
- * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
- */
-#define node_to_cpumask_ptr(v, node) \
- cpumask_t *v = &(numa_cpumask_lookup_table[node])
-
-#define node_to_cpumask_ptr_next(v, node) \
- v = &(numa_cpumask_lookup_table[node])
-
struct pci_bus;
#ifdef CONFIG_PCI
extern int pcibus_to_node(struct pci_bus *pbus);
@@ -72,8 +58,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define mc_capable() (sparc64_multi_core)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index bd075054942b..f0ee79055409 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
#include <linux/irq.h>
#include <asm/ptrace.h>
@@ -914,25 +913,19 @@ void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
tb->nonresum_qmask);
}
-static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
-{
- unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem(size, size, 0);
- if (!p) {
- prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
- prom_halt();
- }
-
- *pa_ptr = __pa(p);
-}
-
-static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
+/* Each queue region must be a power of 2 multiple of 64 bytes in
+ * size. The base real address must be aligned to the size of the
+ * region. Thus, an 8KB queue must be 8KB aligned, for example.
+ */
+static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem(size, size, 0);
+ unsigned long order = get_order(size);
+ unsigned long p;
+ p = __get_free_pages(GFP_KERNEL, order);
if (!p) {
- prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
+ prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
}
@@ -942,11 +935,11 @@ static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
- void *page;
+ unsigned long page;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
- page = alloc_bootmem_pages(PAGE_SIZE);
+ page = get_zeroed_page(GFP_KERNEL);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
@@ -965,13 +958,13 @@ static void __init sun4v_init_mondo_queues(void)
for_each_possible_cpu(cpu) {
struct trap_per_cpu *tb = &trap_block[cpu];
- alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
- alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
- alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
- alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
- alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
- alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
- tb->nonresum_qmask);
+ alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+ alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+ alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
+ alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+ alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+ alloc_one_queue(&tb->nonresum_kernel_buf_pa,
+ tb->nonresum_qmask);
}
}
@@ -999,7 +992,7 @@ void __init init_IRQ(void)
kill_prom_timer();
size = sizeof(struct ino_bucket) * NUM_IVECS;
- ivector_table = alloc_bootmem(size);
+ ivector_table = kzalloc(size, GFP_KERNEL);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index fa44eaf8d897..6970333b48b8 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1415,19 +1415,6 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
#endif
}
-static size_t pcpur_size __initdata;
-static void **pcpur_ptrs __initdata;
-
-static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
-{
- size_t off = (size_t)pageno << PAGE_SHIFT;
-
- if (off >= pcpur_size)
- return NULL;
-
- return virt_to_page(pcpur_ptrs[cpu] + off);
-}
-
#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
static void __init pcpu_map_range(unsigned long start, unsigned long end,
@@ -1491,25 +1478,26 @@ void __init setup_per_cpu_areas(void)
size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
static struct vm_struct vm;
unsigned long delta, cpu;
- size_t pcpu_unit_size;
+ size_t size_sum, pcpu_unit_size;
size_t ptrs_size;
+ void **ptrs;
- pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
- PERCPU_DYNAMIC_RESERVE);
- dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
+ size_sum = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+ PERCPU_DYNAMIC_RESERVE);
+ dyn_size = size_sum - static_size - PERCPU_MODULE_RESERVE;
- ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
- pcpur_ptrs = alloc_bootmem(ptrs_size);
+ ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(ptrs[0]));
+ ptrs = alloc_bootmem(ptrs_size);
for_each_possible_cpu(cpu) {
- pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
- PCPU_CHUNK_SIZE);
+ ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
+ PCPU_CHUNK_SIZE);
- free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
- PCPU_CHUNK_SIZE - pcpur_size);
+ free_bootmem(__pa(ptrs[cpu] + size_sum),
+ PCPU_CHUNK_SIZE - size_sum);
- memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
+ memcpy(ptrs[cpu], __per_cpu_load, static_size);
}
/* allocate address and map */
@@ -1523,14 +1511,14 @@ void __init setup_per_cpu_areas(void)
start += cpu * PCPU_CHUNK_SIZE;
end = start + PCPU_CHUNK_SIZE;
- pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
+ pcpu_map_range(start, end, virt_to_page(ptrs[cpu]));
}
- pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+ pcpu_unit_size = pcpu_setup_first_chunk(static_size,
PERCPU_MODULE_RESERVE, dyn_size,
PCPU_CHUNK_SIZE, vm.addr, NULL);
- free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+ free_bootmem(__pa(ptrs), ptrs_size);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index fcbbd000ec08..d63cf914667d 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -175,6 +175,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
STABS_DEBUG
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index 5ec17563142e..dd2aadc14af0 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -30,7 +30,6 @@ static void slip_init(struct net_device *dev, void *data)
slip_proto_init(&spri->slip);
- dev->init = NULL;
dev->hard_header_len = 0;
dev->header_ops = NULL;
dev->addr_len = 0;
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index f15a6e7654f3..e376284f0fb7 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -32,7 +32,6 @@ void slirp_init(struct net_device *dev, void *data)
slip_proto_init(&spri->slip);
- dev->init = NULL;
dev->hard_header_len = 0;
dev->header_ops = NULL;
dev->addr_len = 0;
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
index 90fc708b320e..378de4bbf49f 100644
--- a/arch/um/include/asm/dma-mapping.h
+++ b/arch/um/include/asm/dma-mapping.h
@@ -79,14 +79,14 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
}
static inline void
-dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
-dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 54f42e8b0105..34d813011b7a 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if(prev != next){
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
if(next != &init_mm)
__switch_mm(&next->context.id);
}
diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h
index 59923199cdc3..b44cf59ede1e 100644
--- a/arch/um/include/asm/pci.h
+++ b/arch/um/include/asm/pci.h
@@ -2,6 +2,5 @@
#define __UM_PCI_H
#define PCI_DMA_BUS_IS_PHYS (1)
-#define pcibios_scan_all_fns(a, b) 0
#endif
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 9975e1ab44fb..2916d6eadffd 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -156,4 +156,6 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+
+ /DISCARD/ : { *(.discard) }
}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 98351c78bc81..106bf27e2a9a 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
int i;
for (i = 0; i < ncpus; ++i)
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
cpu_clear(me, cpu_online_map);
cpu_set(me, cpu_online_map);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 11b835248b86..1f8a622cabe1 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -100,4 +100,6 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+
+ /DISCARD/ : { *(.discard) }
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d1430ef6b4f9..14c506cfae22 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -149,9 +149,6 @@ config ARCH_HAS_CACHE_LINE_SIZE
config HAVE_SETUP_PER_CPU_AREA
def_bool y
-config HAVE_DYNAMIC_PER_CPU_AREA
- def_bool y
-
config HAVE_CPUMASK_OF_CPU_MAP
def_bool X86_64_SMP
@@ -1247,6 +1244,11 @@ config ARCH_MEMORY_PROBE
def_bool X86_64
depends on MEMORY_HOTPLUG
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0 if X86_32
+ default 0xdead000000000000 if X86_64
+
source "mm/Kconfig"
config HIGHPTE
@@ -1913,25 +1915,26 @@ config DMAR_DEFAULT_ON
recommended you say N here while the DMAR code remains
experimental.
-config DMAR_GFX_WA
- def_bool y
- prompt "Support for Graphics workaround"
+config DMAR_BROKEN_GFX_WA
+ def_bool n
+ prompt "Workaround broken graphics drivers (going away soon)"
depends on DMAR
---help---
Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config
option permits the IOMMU driver to set a unity map for
all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA.
+ to use physical addresses for DMA, at least until this
+ option is removed in the 2.6.32 kernel.
config DMAR_FLOPPY_WA
def_bool y
depends on DMAR
---help---
- Floppy disk drivers are know to bypass DMA API calls
+ Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
workaround will setup a 1:1 mapping for the first
- 16M to make floppy (an ISA device) work.
+ 16MiB to make floppy (an ISA device) work.
config INTR_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c
index d660be492363..49e0c18833e0 100644
--- a/arch/x86/boot/video-bios.c
+++ b/arch/x86/boot/video-bios.c
@@ -37,14 +37,13 @@ static int set_bios_mode(u8 mode)
ireg.al = mode; /* AH=0x00 Set Video Mode */
intcall(0x10, &ireg, NULL);
-
ireg.ah = 0x0f; /* Get Current Video Mode */
intcall(0x10, &ireg, &oreg);
do_restore = 1; /* Assume video contents were lost */
/* Not all BIOSes are clean with the top bit */
- new_mode = ireg.al & 0x7f;
+ new_mode = oreg.al & 0x7f;
if (new_mode == mode)
return 0; /* Mode change OK */
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
index c700147d6ffb..11e8c6eb80a1 100644
--- a/arch/x86/boot/video-vesa.c
+++ b/arch/x86/boot/video-vesa.c
@@ -31,7 +31,6 @@ static inline void vesa_store_mode_params_graphics(void) {}
static int vesa_probe(void)
{
-#if defined(CONFIG_VIDEO_VESA) || defined(CONFIG_FIRMWARE_EDID)
struct biosregs ireg, oreg;
u16 mode;
addr_t mode_ptr;
@@ -45,12 +44,11 @@ static int vesa_probe(void)
ireg.di = (size_t)&vginfo;
intcall(0x10, &ireg, &oreg);
- if (ireg.ax != 0x004f ||
+ if (oreg.ax != 0x004f ||
vginfo.signature != VESA_MAGIC ||
vginfo.version < 0x0102)
return 0; /* Not present */
-#endif /* CONFIG_VIDEO_VESA || CONFIG_FIRMWARE_EDID */
-#ifdef CONFIG_VIDEO_VESA
+
set_fs(vginfo.video_mode_ptr.seg);
mode_ptr = vginfo.video_mode_ptr.off;
@@ -70,7 +68,7 @@ static int vesa_probe(void)
ireg.di = (size_t)&vminfo;
intcall(0x10, &ireg, &oreg);
- if (ireg.ax != 0x004f)
+ if (oreg.ax != 0x004f)
continue;
if ((vminfo.mode_attr & 0x15) == 0x05) {
@@ -102,9 +100,6 @@ static int vesa_probe(void)
}
return nmodes;
-#else
- return 0;
-#endif /* CONFIG_VIDEO_VESA */
}
static int vesa_set_mode(struct mode_info *mode)
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
index 8f8d827e254d..819caa1f2008 100644
--- a/arch/x86/boot/video-vga.c
+++ b/arch/x86/boot/video-vga.c
@@ -47,14 +47,6 @@ static u8 vga_set_basic_mode(void)
initregs(&ireg);
-#ifdef CONFIG_VIDEO_400_HACK
- if (adapter >= ADAPTER_VGA) {
- ireg.ax = 0x1202;
- ireg.bx = 0x0030;
- intcall(0x10, &ireg, NULL);
- }
-#endif
-
ax = 0x0f00;
intcall(0x10, &ireg, &oreg);
mode = oreg.al;
@@ -62,11 +54,9 @@ static u8 vga_set_basic_mode(void)
set_fs(0);
rows = rdfs8(0x484); /* rows minus one */
-#ifndef CONFIG_VIDEO_400_HACK
if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) &&
(rows == 0 || rows == 24))
return mode;
-#endif
if (mode != 3 && mode != 7)
mode = 3;
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index bad728b76fc2..d42da3802499 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -221,7 +221,6 @@ static unsigned int mode_menu(void)
}
}
-#ifdef CONFIG_VIDEO_RETAIN
/* Save screen content to the heap */
static struct saved_screen {
int x, y;
@@ -299,10 +298,6 @@ static void restore_screen(void)
ireg.dl = saved.curx;
intcall(0x10, &ireg, NULL);
}
-#else
-#define save_screen() ((void)0)
-#define restore_screen() ((void)0)
-#endif
void set_video(void)
{
diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
index 5bb174a997fc..ff339c5db311 100644
--- a/arch/x86/boot/video.h
+++ b/arch/x86/boot/video.h
@@ -17,19 +17,8 @@
#include <linux/types.h>
-/* Enable autodetection of SVGA adapters and modes. */
-#undef CONFIG_VIDEO_SVGA
-
-/* Enable autodetection of VESA modes */
-#define CONFIG_VIDEO_VESA
-
-/* Retain screen contents when switching modes */
-#define CONFIG_VIDEO_RETAIN
-
-/* Force 400 scan lines for standard modes (hack to fix bad BIOS behaviour */
-#undef CONFIG_VIDEO_400_HACK
-
-/* This code uses an extended set of video mode numbers. These include:
+/*
+ * This code uses an extended set of video mode numbers. These include:
* Aliases for standard modes
* NORMAL_VGA (-1)
* EXTENDED_VGA (-2)
@@ -67,13 +56,8 @@
/* The "recalculate timings" flag */
#define VIDEO_RECALC 0x8000
-/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */
-#ifdef CONFIG_VIDEO_RETAIN
void store_screen(void);
#define DO_STORE() store_screen()
-#else
-#define DO_STORE() ((void)0)
-#endif /* CONFIG_VIDEO_RETAIN */
/*
* Mode table structures
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index c580c5ec1cad..d3ec8d588d4b 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -636,7 +636,7 @@ static int __init aesni_init(void)
int err;
if (!cpu_has_aes) {
- printk(KERN_ERR "Intel AES-NI instructions are not detected.\n");
+ printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
return -ENODEV;
}
if ((err = crypto_register_alg(&aesni_alg)))
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 7ddb36ab933b..74ca38f6a4de 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -14,6 +14,7 @@
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
+#define APIC_LVR_DIRECTED_EOI (1 << 24)
#define GET_APIC_VERSION(x) ((x) & 0xFFu)
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
#ifdef CONFIG_X86_32
@@ -40,6 +41,7 @@
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
+#define APIC_SPIV_DIRECTED_EOI (1 << 12)
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ISR 0x100
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
index 2503d4e64c2a..dc5a667ff791 100644
--- a/arch/x86/include/asm/atomic_32.h
+++ b/arch/x86/include/asm/atomic_32.h
@@ -19,7 +19,10 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+static inline int atomic_read(const atomic_t *v)
+{
+ return v->counter;
+}
/**
* atomic_set - set atomic variable
@@ -28,7 +31,10 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v, i) (((v)->counter) = (i))
+static inline void atomic_set(atomic_t *v, int i)
+{
+ v->counter = i;
+}
/**
* atomic_add - add integer to atomic variable
@@ -200,8 +206,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return atomic_add_return(-i, v);
}
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+ return xchg(&v->counter, new);
+}
/**
* atomic_add_unless - add unless the number is already a given value
@@ -250,45 +263,12 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
/* An 64bit atomic type */
typedef struct {
- unsigned long long counter;
+ u64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
-/**
- * atomic64_read - read atomic64 variable
- * @ptr: pointer of type atomic64_t
- *
- * Atomically reads the value of @v.
- * Doesn't imply a read memory barrier.
- */
-#define __atomic64_read(ptr) ((ptr)->counter)
-
-static inline unsigned long long
-cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new)
-{
- asm volatile(
-
- LOCK_PREFIX "cmpxchg8b (%[ptr])\n"
-
- : "=A" (old)
-
- : [ptr] "D" (ptr),
- "A" (old),
- "b" (ll_low(new)),
- "c" (ll_high(new))
-
- : "memory");
-
- return old;
-}
-
-static inline unsigned long long
-atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
- unsigned long long new_val)
-{
- return cmpxchg8b(&ptr->counter, old_val, new_val);
-}
+extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
/**
* atomic64_xchg - xchg atomic64 variable
@@ -298,18 +278,7 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
* Atomically xchgs the value of @ptr to @new_val and returns
* the old value.
*/
-
-static inline unsigned long long
-atomic64_xchg(atomic64_t *ptr, unsigned long long new_val)
-{
- unsigned long long old_val;
-
- do {
- old_val = atomic_read(ptr);
- } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val);
-
- return old_val;
-}
+extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
/**
* atomic64_set - set atomic64 variable
@@ -318,10 +287,7 @@ atomic64_xchg(atomic64_t *ptr, unsigned long long new_val)
*
* Atomically sets the value of @ptr to @new_val.
*/
-static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val)
-{
- atomic64_xchg(ptr, new_val);
-}
+extern void atomic64_set(atomic64_t *ptr, u64 new_val);
/**
* atomic64_read - read atomic64 variable
@@ -329,17 +295,30 @@ static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val)
*
* Atomically reads the value of @ptr and returns it.
*/
-static inline unsigned long long atomic64_read(atomic64_t *ptr)
+static inline u64 atomic64_read(atomic64_t *ptr)
{
- unsigned long long curr_val;
-
- do {
- curr_val = __atomic64_read(ptr);
- } while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val);
-
- return curr_val;
+ u64 res;
+
+ /*
+ * Note, we inline this atomic64_t primitive because
+ * it only clobbers EAX/EDX and leaves the others
+ * untouched. We also (somewhat subtly) rely on the
+ * fact that cmpxchg8b returns the current 64-bit value
+ * of the memory location we are touching:
+ */
+ asm volatile(
+ "mov %%ebx, %%eax\n\t"
+ "mov %%ecx, %%edx\n\t"
+ LOCK_PREFIX "cmpxchg8b %1\n"
+ : "=&A" (res)
+ : "m" (*ptr)
+ );
+
+ return res;
}
+extern u64 atomic64_read(atomic64_t *ptr);
+
/**
* atomic64_add_return - add and return
* @delta: integer value to add
@@ -347,34 +326,14 @@ static inline unsigned long long atomic64_read(atomic64_t *ptr)
*
* Atomically adds @delta to @ptr and returns @delta + *@ptr
*/
-static inline unsigned long long
-atomic64_add_return(unsigned long long delta, atomic64_t *ptr)
-{
- unsigned long long old_val, new_val;
-
- do {
- old_val = atomic_read(ptr);
- new_val = old_val + delta;
-
- } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val);
-
- return new_val;
-}
-
-static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr)
-{
- return atomic64_add_return(-delta, ptr);
-}
+extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
-static inline long atomic64_inc_return(atomic64_t *ptr)
-{
- return atomic64_add_return(1, ptr);
-}
-
-static inline long atomic64_dec_return(atomic64_t *ptr)
-{
- return atomic64_sub_return(1, ptr);
-}
+/*
+ * Other variants with different arithmetic operators:
+ */
+extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
+extern u64 atomic64_inc_return(atomic64_t *ptr);
+extern u64 atomic64_dec_return(atomic64_t *ptr);
/**
* atomic64_add - add integer to atomic64 variable
@@ -383,10 +342,7 @@ static inline long atomic64_dec_return(atomic64_t *ptr)
*
* Atomically adds @delta to @ptr.
*/
-static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr)
-{
- atomic64_add_return(delta, ptr);
-}
+extern void atomic64_add(u64 delta, atomic64_t *ptr);
/**
* atomic64_sub - subtract the atomic64 variable
@@ -395,10 +351,7 @@ static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr)
*
* Atomically subtracts @delta from @ptr.
*/
-static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr)
-{
- atomic64_add(-delta, ptr);
-}
+extern void atomic64_sub(u64 delta, atomic64_t *ptr);
/**
* atomic64_sub_and_test - subtract value from variable and test result
@@ -409,13 +362,7 @@ static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr)
* true if the result is zero, or false for all
* other cases.
*/
-static inline int
-atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr)
-{
- unsigned long long old_val = atomic64_sub_return(delta, ptr);
-
- return old_val == 0;
-}
+extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
/**
* atomic64_inc - increment atomic64 variable
@@ -423,10 +370,7 @@ atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr)
*
* Atomically increments @ptr by 1.
*/
-static inline void atomic64_inc(atomic64_t *ptr)
-{
- atomic64_add(1, ptr);
-}
+extern void atomic64_inc(atomic64_t *ptr);
/**
* atomic64_dec - decrement atomic64 variable
@@ -434,10 +378,7 @@ static inline void atomic64_inc(atomic64_t *ptr)
*
* Atomically decrements @ptr by 1.
*/
-static inline void atomic64_dec(atomic64_t *ptr)
-{
- atomic64_sub(1, ptr);
-}
+extern void atomic64_dec(atomic64_t *ptr);
/**
* atomic64_dec_and_test - decrement and test
@@ -447,10 +388,7 @@ static inline void atomic64_dec(atomic64_t *ptr)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline int atomic64_dec_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(1, ptr);
-}
+extern int atomic64_dec_and_test(atomic64_t *ptr);
/**
* atomic64_inc_and_test - increment and test
@@ -460,10 +398,7 @@ static inline int atomic64_dec_and_test(atomic64_t *ptr)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline int atomic64_inc_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(-1, ptr);
-}
+extern int atomic64_inc_and_test(atomic64_t *ptr);
/**
* atomic64_add_negative - add and test if negative
@@ -474,13 +409,7 @@ static inline int atomic64_inc_and_test(atomic64_t *ptr)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline int
-atomic64_add_negative(unsigned long long delta, atomic64_t *ptr)
-{
- long long old_val = atomic64_add_return(delta, ptr);
-
- return old_val < 0;
-}
+extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
#include <asm-generic/atomic-long.h>
#endif /* _ASM_X86_ATOMIC_32_H */
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
index 0d6360220007..d605dc268e79 100644
--- a/arch/x86/include/asm/atomic_64.h
+++ b/arch/x86/include/asm/atomic_64.h
@@ -18,7 +18,10 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+static inline int atomic_read(const atomic_t *v)
+{
+ return v->counter;
+}
/**
* atomic_set - set atomic variable
@@ -27,7 +30,10 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v, i) (((v)->counter) = (i))
+static inline void atomic_set(atomic_t *v, int i)
+{
+ v->counter = i;
+}
/**
* atomic_add - add integer to atomic variable
@@ -192,7 +198,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
-#define atomic64_read(v) ((v)->counter)
+static inline long atomic64_read(const atomic64_t *v)
+{
+ return v->counter;
+}
/**
* atomic64_set - set atomic64 variable
@@ -201,7 +210,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
*
* Atomically sets the value of @v to @i.
*/
-#define atomic64_set(v, i) (((v)->counter) = (i))
+static inline void atomic64_set(atomic64_t *v, long i)
+{
+ v->counter = i;
+}
/**
* atomic64_add - add integer to atomic64 variable
@@ -355,11 +367,25 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+{
+ return cmpxchg(&v->counter, old, new);
+}
+
+static inline long atomic64_xchg(atomic64_t *v, long new)
+{
+ return xchg(&v->counter, new);
+}
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ return cmpxchg(&v->counter, old, new);
+}
+
+static inline long atomic_xchg(atomic_t *v, int new)
+{
+ return xchg(&v->counter, new);
+}
/**
* atomic_add_unless - add unless the number is a given value
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 418e632d4a80..7a1065958ba9 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -8,7 +8,7 @@
#ifdef __KERNEL__
-#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
@@ -16,10 +16,10 @@
& ~(CONFIG_PHYSICAL_ALIGN - 1))
/* Minimum kernel alignment, as a power of two */
-#ifdef CONFIG_x86_64
+#ifdef CONFIG_X86_64
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
#else
-#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT+1)
+#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER)
#endif
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 4994a20acbcb..cee34e9ca45b 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -13,4 +13,7 @@ struct dma_map_ops *dma_ops;
#endif
};
+struct pdev_archdata {
+};
+
#endif /* _ASM_X86_DEVICE_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 2d81af3974a0..7b2d71df39a6 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -111,12 +111,9 @@ enum fixed_addresses {
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
- FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */
- FIX_TEXT_POKE1,
+ FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
+ FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
__end_of_permanent_fixed_addresses,
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
- FIX_OHCI1394_BASE,
-#endif
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
@@ -129,6 +126,9 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+ FIX_OHCI1394_BASE,
+#endif
#ifdef CONFIG_X86_32
FIX_WP_TEST,
#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 175adf58dd4f..2e7529295f55 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -26,6 +26,7 @@ extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
extern asmlinkage void math_state_restore(void);
+extern void __math_state_restore(void);
extern void init_thread_xstate(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h
index 0d5b23b7b06e..ec34c760665e 100644
--- a/arch/x86/include/asm/ioctls.h
+++ b/arch/x86/include/asm/ioctls.h
@@ -1,94 +1 @@
-#ifndef _ASM_X86_IOCTLS_H
-#define _ASM_X86_IOCTLS_H
-
-#include <asm/ioctl.h>
-
-/* 0x54 is just a magic number to make these relatively unique ('T') */
-
-#define TCGETS 0x5401
-#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
-#define TCSETSW 0x5403
-#define TCSETSF 0x5404
-#define TCGETA 0x5405
-#define TCSETA 0x5406
-#define TCSETAW 0x5407
-#define TCSETAF 0x5408
-#define TCSBRK 0x5409
-#define TCXONC 0x540A
-#define TCFLSH 0x540B
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-#define TIOCGPGRP 0x540F
-#define TIOCSPGRP 0x5410
-#define TIOCOUTQ 0x5411
-#define TIOCSTI 0x5412
-#define TIOCGWINSZ 0x5413
-#define TIOCSWINSZ 0x5414
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define FIONREAD 0x541B
-#define TIOCINQ FIONREAD
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-#define FIONBIO 0x5421
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T', 0x2A, struct termios2)
-#define TCSETS2 _IOW('T', 0x2B, struct termios2)
-#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
-#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
-#define TIOCGRS485 0x542E
-#define TIOCSRS485 0x542F
-#define TIOCGPTN _IOR('T', 0x30, unsigned int)
- /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
-#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
-#define TCSETX 0x5433
-#define TCSETXF 0x5434
-#define TCSETXW 0x5435
-
-#define FIONCLEX 0x5450
-#define FIOCLEX 0x5451
-#define FIOASYNC 0x5452
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
-#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
-#define FIOQSIZE 0x5460
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-#endif /* _ASM_X86_IOCTLS_H */
+#include <asm-generic/ioctls.h>
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h
index ee678fd51594..84c7e51cb6d0 100644
--- a/arch/x86/include/asm/ipcbuf.h
+++ b/arch/x86/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ASM_X86_IPCBUF_H
-#define _ASM_X86_IPCBUF_H
-
-/*
- * The ipc64_perm structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm {
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_X86_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 125be8b19568..708b9c32a5da 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -17,6 +17,7 @@
#define __KVM_HAVE_USER_NMI
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_MSIX
+#define __KVM_HAVE_MCE
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index eabdc1cfab5c..30b625d8e5f0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
+#include <linux/tracepoint.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -37,12 +38,14 @@
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
0xFFFFFF0000000000ULL)
-#define KVM_GUEST_CR0_MASK \
- (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
- | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK \
+ (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
#define KVM_VM_CR0_ALWAYS_ON \
- (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
- | X86_CR0_MP)
+ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_GUEST_CR4_MASK \
(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -51,12 +54,12 @@
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
-/* shadow tables are PAE even on non-PAE hosts */
-#define KVM_HPAGE_SHIFT 21
-#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
-#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
-
-#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
+/* KVM Hugepage definitions for x86 */
+#define KVM_NR_PAGE_SIZES 2
+#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
+#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
+#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
+#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
#define DE_VECTOR 0
#define DB_VECTOR 1
@@ -120,6 +123,10 @@ enum kvm_reg {
NR_VCPU_REGS
};
+enum kvm_reg_ex {
+ VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+};
+
enum {
VCPU_SREG_ES,
VCPU_SREG_CS,
@@ -334,16 +341,6 @@ struct kvm_vcpu_arch {
u8 nr;
} interrupt;
- struct {
- int vm86_active;
- u8 save_iopl;
- struct kvm_save_segment {
- u16 selector;
- unsigned long base;
- u32 limit;
- u32 ar;
- } tr, es, ds, fs, gs;
- } rmode;
int halt_request; /* real mode on Intel only */
int cpuid_nent;
@@ -373,6 +370,11 @@ struct kvm_vcpu_arch {
unsigned long dr6;
unsigned long dr7;
unsigned long eff_db[KVM_NR_DB_REGS];
+
+ u64 mcg_cap;
+ u64 mcg_status;
+ u64 mcg_ctl;
+ u64 *mce_banks;
};
struct kvm_mem_alias {
@@ -526,6 +528,7 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+ const struct trace_print_flags *exit_reasons_str;
};
extern struct kvm_x86_ops *kvm_x86_ops;
@@ -752,8 +755,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}
-#define MSR_IA32_TIME_STAMP_COUNTER 0x010
-
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index d31c4a684078..33600a66755f 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -30,7 +30,7 @@
#include <asm/hw_irq.h>
#include <asm/kvm_para.h>
-/*G:031 But first, how does our Guest contact the Host to ask for privileged
+/*G:030 But first, how does our Guest contact the Host to ask for privileged
* operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself.
*
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 5cdd8d100ec9..b50b9e9042c4 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -9,7 +9,7 @@
*/
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
-#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
+#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
index 751af2550ed9..593e51d4643f 100644
--- a/arch/x86/include/asm/mman.h
+++ b/arch/x86/include/asm/mman.h
@@ -1,20 +1,8 @@
#ifndef _ASM_X86_MMAN_H
#define _ASM_X86_MMAN_H
-#include <asm-generic/mman-common.h>
-
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
+#include <asm-generic/mman.h>
#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f923203dc39a..4a2d4e0c18d9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
#endif
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
load_cr3(next->pgd);
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 47d62743c4d5..555bc12bdcd6 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -1,18 +1,7 @@
#ifndef _ASM_X86_MODULE_H
#define _ASM_X86_MODULE_H
-/* x86_32/64 are simple */
-struct mod_arch_specific {};
-
-#ifdef CONFIG_X86_32
-# define Elf_Shdr Elf32_Shdr
-# define Elf_Sym Elf32_Sym
-# define Elf_Ehdr Elf32_Ehdr
-#else
-# define Elf_Shdr Elf64_Shdr
-# define Elf_Sym Elf64_Sym
-# define Elf_Ehdr Elf64_Ehdr
-#endif
+#include <asm-generic/module.h>
#ifdef CONFIG_X86_64
/* X86_64 does not define MODULE_PROC_FAMILY */
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h
index 7e4e9481f51c..809134c644a6 100644
--- a/arch/x86/include/asm/msgbuf.h
+++ b/arch/x86/include/asm/msgbuf.h
@@ -1,39 +1 @@
-#ifndef _ASM_X86_MSGBUF_H
-#define _ASM_X86_MSGBUF_H
-
-/*
- * The msqid64_ds structure for i386 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space on i386 is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- *
- * Pad space on x8664 is left for:
- * - 2 miscellaneous 64-bit values
- */
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
-#ifdef __i386__
- unsigned long __unused1;
-#endif
- __kernel_time_t msg_rtime; /* last msgrcv time */
-#ifdef __i386__
- unsigned long __unused2;
-#endif
- __kernel_time_t msg_ctime; /* last change time */
-#ifdef __i386__
- unsigned long __unused3;
-#endif
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* _ASM_X86_MSGBUF_H */
+#include <asm-generic/msgbuf.h>
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1692fb5050e3..bd5549034a95 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -246,10 +246,6 @@
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
-/* Intel Model 6 */
-#define MSR_P6_EVNTSEL0 0x00000186
-#define MSR_P6_EVNTSEL1 0x00000187
-
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
#define MSR_IA32_MCG_EBX 0x00000181
@@ -378,6 +374,7 @@
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
+#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c97264409934..c86e5ed4af51 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -72,7 +72,6 @@ void lapic_watchdog_stop(void);
int lapic_watchdog_init(unsigned nmi_hz);
int lapic_wd_event(unsigned nmi_hz);
unsigned lapic_adjust_nmi_hz(unsigned hz);
-int lapic_watchdog_ok(void);
void disable_lapic_nmi_watchdog(void);
void enable_lapic_nmi_watchdog(void);
void stop_nmi(void);
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h
index 6f0d0422f4ca..965d45427975 100644
--- a/arch/x86/include/asm/param.h
+++ b/arch/x86/include/asm/param.h
@@ -1,22 +1 @@
-#ifndef _ASM_X86_PARAM_H
-#define _ASM_X86_PARAM_H
-
-#ifdef __KERNEL__
-# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* some user interfaces are */
-# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#endif /* _ASM_X86_PARAM_H */
+#include <asm-generic/param.h>
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4fb37c8a0832..6a07af432c81 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -7,689 +7,11 @@
#include <asm/pgtable_types.h>
#include <asm/asm.h>
-/* Bitmask of what can be clobbered: usually at least eax. */
-#define CLBR_NONE 0
-#define CLBR_EAX (1 << 0)
-#define CLBR_ECX (1 << 1)
-#define CLBR_EDX (1 << 2)
-#define CLBR_EDI (1 << 3)
-
-#ifdef CONFIG_X86_32
-/* CLBR_ANY should match all regs platform has. For i386, that's just it */
-#define CLBR_ANY ((1 << 4) - 1)
-
-#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
-#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
-#define CLBR_SCRATCH (0)
-#else
-#define CLBR_RAX CLBR_EAX
-#define CLBR_RCX CLBR_ECX
-#define CLBR_RDX CLBR_EDX
-#define CLBR_RDI CLBR_EDI
-#define CLBR_RSI (1 << 4)
-#define CLBR_R8 (1 << 5)
-#define CLBR_R9 (1 << 6)
-#define CLBR_R10 (1 << 7)
-#define CLBR_R11 (1 << 8)
-
-#define CLBR_ANY ((1 << 9) - 1)
-
-#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
- CLBR_RCX | CLBR_R8 | CLBR_R9)
-#define CLBR_RET_REG (CLBR_RAX)
-#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
-
-#include <asm/desc_defs.h>
-#endif /* X86_64 */
-
-#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+#include <asm/paravirt_types.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <asm/kmap_types.h>
-#include <asm/desc_defs.h>
-
-struct page;
-struct thread_struct;
-struct desc_ptr;
-struct tss_struct;
-struct mm_struct;
-struct desc_struct;
-struct task_struct;
-
-/*
- * Wrapper type for pointers to code which uses the non-standard
- * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
- */
-struct paravirt_callee_save {
- void *func;
-};
-
-/* general info */
-struct pv_info {
- unsigned int kernel_rpl;
- int shared_kernel_pmd;
- int paravirt_enabled;
- const char *name;
-};
-
-struct pv_init_ops {
- /*
- * Patch may replace one of the defined code sequences with
- * arbitrary code, subject to the same register constraints.
- * This generally means the code is not free to clobber any
- * registers other than EAX. The patch function should return
- * the number of bytes of code generated, as we nop pad the
- * rest in generic code.
- */
- unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
- unsigned long addr, unsigned len);
-
- /* Basic arch-specific setup */
- void (*arch_setup)(void);
- char *(*memory_setup)(void);
- void (*post_allocator_init)(void);
-
- /* Print a banner to identify the environment */
- void (*banner)(void);
-};
-
-
-struct pv_lazy_ops {
- /* Set deferred update mode, used for batching operations. */
- void (*enter)(void);
- void (*leave)(void);
-};
-
-struct pv_time_ops {
- void (*time_init)(void);
-
- /* Set and set time of day */
- unsigned long (*get_wallclock)(void);
- int (*set_wallclock)(unsigned long);
-
- unsigned long long (*sched_clock)(void);
- unsigned long (*get_tsc_khz)(void);
-};
-
-struct pv_cpu_ops {
- /* hooks for various privileged instructions */
- unsigned long (*get_debugreg)(int regno);
- void (*set_debugreg)(int regno, unsigned long value);
-
- void (*clts)(void);
-
- unsigned long (*read_cr0)(void);
- void (*write_cr0)(unsigned long);
-
- unsigned long (*read_cr4_safe)(void);
- unsigned long (*read_cr4)(void);
- void (*write_cr4)(unsigned long);
-
-#ifdef CONFIG_X86_64
- unsigned long (*read_cr8)(void);
- void (*write_cr8)(unsigned long);
-#endif
-
- /* Segment descriptor handling */
- void (*load_tr_desc)(void);
- void (*load_gdt)(const struct desc_ptr *);
- void (*load_idt)(const struct desc_ptr *);
- void (*store_gdt)(struct desc_ptr *);
- void (*store_idt)(struct desc_ptr *);
- void (*set_ldt)(const void *desc, unsigned entries);
- unsigned long (*store_tr)(void);
- void (*load_tls)(struct thread_struct *t, unsigned int cpu);
-#ifdef CONFIG_X86_64
- void (*load_gs_index)(unsigned int idx);
-#endif
- void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
- const void *desc);
- void (*write_gdt_entry)(struct desc_struct *,
- int entrynum, const void *desc, int size);
- void (*write_idt_entry)(gate_desc *,
- int entrynum, const gate_desc *gate);
- void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
- void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
-
- void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
-
- void (*set_iopl_mask)(unsigned mask);
-
- void (*wbinvd)(void);
- void (*io_delay)(void);
-
- /* cpuid emulation, mostly so that caps bits can be disabled */
- void (*cpuid)(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
-
- /* MSR, PMC and TSR operations.
- err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
- u64 (*read_msr_amd)(unsigned int msr, int *err);
- u64 (*read_msr)(unsigned int msr, int *err);
- int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
-
- u64 (*read_tsc)(void);
- u64 (*read_pmc)(int counter);
- unsigned long long (*read_tscp)(unsigned int *aux);
-
- /*
- * Atomically enable interrupts and return to userspace. This
- * is only ever used to return to 32-bit processes; in a
- * 64-bit kernel, it's used for 32-on-64 compat processes, but
- * never native 64-bit processes. (Jump, not call.)
- */
- void (*irq_enable_sysexit)(void);
-
- /*
- * Switch to usermode gs and return to 64-bit usermode using
- * sysret. Only used in 64-bit kernels to return to 64-bit
- * processes. Usermode register state, including %rsp, must
- * already be restored.
- */
- void (*usergs_sysret64)(void);
-
- /*
- * Switch to usermode gs and return to 32-bit usermode using
- * sysret. Used to return to 32-on-64 compat processes.
- * Other usermode register state, including %esp, must already
- * be restored.
- */
- void (*usergs_sysret32)(void);
-
- /* Normal iret. Jump to this with the standard iret stack
- frame set up. */
- void (*iret)(void);
-
- void (*swapgs)(void);
-
- void (*start_context_switch)(struct task_struct *prev);
- void (*end_context_switch)(struct task_struct *next);
-};
-
-struct pv_irq_ops {
- void (*init_IRQ)(void);
-
- /*
- * Get/set interrupt state. save_fl and restore_fl are only
- * expected to use X86_EFLAGS_IF; all other bits
- * returned from save_fl are undefined, and may be ignored by
- * restore_fl.
- *
- * NOTE: These functions callers expect the callee to preserve
- * more registers than the standard C calling convention.
- */
- struct paravirt_callee_save save_fl;
- struct paravirt_callee_save restore_fl;
- struct paravirt_callee_save irq_disable;
- struct paravirt_callee_save irq_enable;
-
- void (*safe_halt)(void);
- void (*halt)(void);
-
-#ifdef CONFIG_X86_64
- void (*adjust_exception_frame)(void);
-#endif
-};
-
-struct pv_apic_ops {
-#ifdef CONFIG_X86_LOCAL_APIC
- void (*setup_boot_clock)(void);
- void (*setup_secondary_clock)(void);
-
- void (*startup_ipi_hook)(int phys_apicid,
- unsigned long start_eip,
- unsigned long start_esp);
-#endif
-};
-
-struct pv_mmu_ops {
- /*
- * Called before/after init_mm pagetable setup. setup_start
- * may reset %cr3, and may pre-install parts of the pagetable;
- * pagetable setup is expected to preserve any existing
- * mapping.
- */
- void (*pagetable_setup_start)(pgd_t *pgd_base);
- void (*pagetable_setup_done)(pgd_t *pgd_base);
-
- unsigned long (*read_cr2)(void);
- void (*write_cr2)(unsigned long);
-
- unsigned long (*read_cr3)(void);
- void (*write_cr3)(unsigned long);
-
- /*
- * Hooks for intercepting the creation/use/destruction of an
- * mm_struct.
- */
- void (*activate_mm)(struct mm_struct *prev,
- struct mm_struct *next);
- void (*dup_mmap)(struct mm_struct *oldmm,
- struct mm_struct *mm);
- void (*exit_mmap)(struct mm_struct *mm);
-
-
- /* TLB operations */
- void (*flush_tlb_user)(void);
- void (*flush_tlb_kernel)(void);
- void (*flush_tlb_single)(unsigned long addr);
- void (*flush_tlb_others)(const struct cpumask *cpus,
- struct mm_struct *mm,
- unsigned long va);
-
- /* Hooks for allocating and freeing a pagetable top-level */
- int (*pgd_alloc)(struct mm_struct *mm);
- void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
-
- /*
- * Hooks for allocating/releasing pagetable pages when they're
- * attached to a pagetable
- */
- void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
- void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
- void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
- void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
- void (*release_pte)(unsigned long pfn);
- void (*release_pmd)(unsigned long pfn);
- void (*release_pud)(unsigned long pfn);
-
- /* Pagetable manipulation functions */
- void (*set_pte)(pte_t *ptep, pte_t pteval);
- void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval);
- void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- void (*pte_update)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*pte_update_defer)(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
-
- pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-
- struct paravirt_callee_save pte_val;
- struct paravirt_callee_save make_pte;
-
- struct paravirt_callee_save pgd_val;
- struct paravirt_callee_save make_pgd;
-
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
- void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
- void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*pmd_clear)(pmd_t *pmdp);
-
-#endif /* CONFIG_X86_PAE */
-
- void (*set_pud)(pud_t *pudp, pud_t pudval);
-
- struct paravirt_callee_save pmd_val;
- struct paravirt_callee_save make_pmd;
-
-#if PAGETABLE_LEVELS == 4
- struct paravirt_callee_save pud_val;
- struct paravirt_callee_save make_pud;
-
- void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
-#endif /* PAGETABLE_LEVELS == 4 */
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-#ifdef CONFIG_HIGHPTE
- void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
-#endif
-
- struct pv_lazy_ops lazy_mode;
-
- /* dom0 ops */
-
- /* Sometimes the physical address is a pfn, and sometimes its
- an mfn. We can tell which is which from the index. */
- void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
- phys_addr_t phys, pgprot_t flags);
-};
-
-struct raw_spinlock;
-struct pv_lock_ops {
- int (*spin_is_locked)(struct raw_spinlock *lock);
- int (*spin_is_contended)(struct raw_spinlock *lock);
- void (*spin_lock)(struct raw_spinlock *lock);
- void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct raw_spinlock *lock);
- void (*spin_unlock)(struct raw_spinlock *lock);
-};
-
-/* This contains all the paravirt structures: we get a convenient
- * number for each function using the offset which we use to indicate
- * what to patch. */
-struct paravirt_patch_template {
- struct pv_init_ops pv_init_ops;
- struct pv_time_ops pv_time_ops;
- struct pv_cpu_ops pv_cpu_ops;
- struct pv_irq_ops pv_irq_ops;
- struct pv_apic_ops pv_apic_ops;
- struct pv_mmu_ops pv_mmu_ops;
- struct pv_lock_ops pv_lock_ops;
-};
-
-extern struct pv_info pv_info;
-extern struct pv_init_ops pv_init_ops;
-extern struct pv_time_ops pv_time_ops;
-extern struct pv_cpu_ops pv_cpu_ops;
-extern struct pv_irq_ops pv_irq_ops;
-extern struct pv_apic_ops pv_apic_ops;
-extern struct pv_mmu_ops pv_mmu_ops;
-extern struct pv_lock_ops pv_lock_ops;
-
-#define PARAVIRT_PATCH(x) \
- (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
-
-#define paravirt_type(op) \
- [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
- [paravirt_opptr] "i" (&(op))
-#define paravirt_clobber(clobber) \
- [paravirt_clobber] "i" (clobber)
-
-/*
- * Generate some code, and mark it as patchable by the
- * apply_paravirt() alternate instruction patcher.
- */
-#define _paravirt_alt(insn_string, type, clobber) \
- "771:\n\t" insn_string "\n" "772:\n" \
- ".pushsection .parainstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 771b\n" \
- " .byte " type "\n" \
- " .byte 772b-771b\n" \
- " .short " clobber "\n" \
- ".popsection\n"
-
-/* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string) \
- _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
-
-/* Simple instruction patching code. */
-#define DEF_NATIVE(ops, name, code) \
- extern const char start_##ops##_##name[], end_##ops##_##name[]; \
- asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
-
-unsigned paravirt_patch_nop(void);
-unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
-unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_ignore(unsigned len);
-unsigned paravirt_patch_call(void *insnbuf,
- const void *target, u16 tgt_clobbers,
- unsigned long addr, u16 site_clobbers,
- unsigned len);
-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
- unsigned long addr, unsigned len);
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
- unsigned long addr, unsigned len);
-
-unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
- const char *start, const char *end);
-
-unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
- unsigned long addr, unsigned len);
-
-int paravirt_disable_iospace(void);
-
-/*
- * This generates an indirect call based on the operation type number.
- * The type number, computed in PARAVIRT_PATCH, is derived from the
- * offset into the paravirt_patch_template structure, and can therefore be
- * freely converted back into a structure offset.
- */
-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
-
-/*
- * These macros are intended to wrap calls through one of the paravirt
- * ops structs, so that they can be later identified and patched at
- * runtime.
- *
- * Normally, a call to a pv_op function is a simple indirect call:
- * (pv_op_struct.operations)(args...).
- *
- * Unfortunately, this is a relatively slow operation for modern CPUs,
- * because it cannot necessarily determine what the destination
- * address is. In this case, the address is a runtime constant, so at
- * the very least we can patch the call to e a simple direct call, or
- * ideally, patch an inline implementation into the callsite. (Direct
- * calls are essentially free, because the call and return addresses
- * are completely predictable.)
- *
- * For i386, these macros rely on the standard gcc "regparm(3)" calling
- * convention, in which the first three arguments are placed in %eax,
- * %edx, %ecx (in that order), and the remaining arguments are placed
- * on the stack. All caller-save registers (eax,edx,ecx) are expected
- * to be modified (either clobbered or used for return values).
- * X86_64, on the other hand, already specifies a register-based calling
- * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
- * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
- * special handling for dealing with 4 arguments, unlike i386.
- * However, x86_64 also have to clobber all caller saved registers, which
- * unfortunately, are quite a bit (r8 - r11)
- *
- * The call instruction itself is marked by placing its start address
- * and size into the .parainstructions section, so that
- * apply_paravirt() in arch/i386/kernel/alternative.c can do the
- * appropriate patching under the control of the backend pv_init_ops
- * implementation.
- *
- * Unfortunately there's no way to get gcc to generate the args setup
- * for the call, and then allow the call itself to be generated by an
- * inline asm. Because of this, we must do the complete arg setup and
- * return value handling from within these macros. This is fairly
- * cumbersome.
- *
- * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
- * It could be extended to more arguments, but there would be little
- * to be gained from that. For each number of arguments, there are
- * the two VCALL and CALL variants for void and non-void functions.
- *
- * When there is a return value, the invoker of the macro must specify
- * the return type. The macro then uses sizeof() on that type to
- * determine whether its a 32 or 64 bit value, and places the return
- * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
- * 64-bit). For x86_64 machines, it just returns at %rax regardless of
- * the return value size.
- *
- * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
- * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
- * in low,high order
- *
- * Small structures are passed and returned in registers. The macro
- * calling convention can't directly deal with this, so the wrapper
- * functions must do this.
- *
- * These PVOP_* macros are only defined within this header. This
- * means that all uses must be wrapped in inline functions. This also
- * makes sure the incoming and outgoing types are always correct.
- */
-#ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS \
- unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
-
-#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
-#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
-#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
-
-#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
- "=c" (__ecx)
-#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
-
-#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
-#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
-
-#define EXTRA_CLOBBERS
-#define VEXTRA_CLOBBERS
-#else /* CONFIG_X86_64 */
-#define PVOP_VCALL_ARGS \
- unsigned long __edi = __edi, __esi = __esi, \
- __edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
-
-#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
-#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
-#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
-#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
-
-#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
- "=S" (__esi), "=d" (__edx), \
- "=c" (__ecx)
-#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
-
-#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
-#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
-
-#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
-#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
-#endif /* CONFIG_X86_32 */
-
-#ifdef CONFIG_PARAVIRT_DEBUG
-#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
-#else
-#define PVOP_TEST_NULL(op) ((void)op)
-#endif
-
-#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
- pre, post, ...) \
- ({ \
- rettype __ret; \
- PVOP_CALL_ARGS; \
- PVOP_TEST_NULL(op); \
- /* This is 32-bit specific, but is okay in 64-bit */ \
- /* since this condition will never hold */ \
- if (sizeof(rettype) > sizeof(unsigned long)) { \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)((((u64)__edx) << 32) | __eax); \
- } else { \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)__eax; \
- } \
- __ret; \
- })
-
-#define __PVOP_CALL(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
- EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
-
-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
- PVOP_CALLEE_CLOBBERS, , \
- pre, post, ##__VA_ARGS__)
-
-
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
- ({ \
- PVOP_VCALL_ARGS; \
- PVOP_TEST_NULL(op); \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- })
-
-#define __PVOP_VCALL(op, pre, post, ...) \
- ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
- VEXTRA_CLOBBERS, \
- pre, post, ##__VA_ARGS__)
-
-#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
- PVOP_VCALLEE_CLOBBERS, , \
- pre, post, ##__VA_ARGS__)
-
-
-
-#define PVOP_CALL0(rettype, op) \
- __PVOP_CALL(rettype, op, "", "")
-#define PVOP_VCALL0(op) \
- __PVOP_VCALL(op, "", "")
-
-#define PVOP_CALLEE0(rettype, op) \
- __PVOP_CALLEESAVE(rettype, op, "", "")
-#define PVOP_VCALLEE0(op) \
- __PVOP_VCALLEESAVE(op, "", "")
-
-
-#define PVOP_CALL1(rettype, op, arg1) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALL1(op, arg1) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
-
-#define PVOP_CALLEE1(rettype, op, arg1) \
- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALLEE1(op, arg1) \
- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
-
-
-#define PVOP_CALL2(rettype, op, arg1, arg2) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALL2(op, arg1, arg2) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-
-#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALLEE2(op, arg1, arg2) \
- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-
-
-#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-#define PVOP_VCALL3(op, arg1, arg2, arg3) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-
-/* This is the only difference in x86_64. We can make it much simpler */
-#ifdef CONFIG_X86_32
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
- __PVOP_CALL(rettype, op, \
- "push %[_arg4];", "lea 4(%%esp),%%esp;", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, \
- "push %[_arg4];", "lea 4(%%esp),%%esp;", \
- "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
- "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
-#else
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
- __PVOP_CALL(rettype, op, "", "", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, "", "", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#endif
static inline int paravirt_enabled(void)
{
@@ -1393,20 +715,6 @@ static inline void pmd_clear(pmd_t *pmdp)
}
#endif /* CONFIG_X86_PAE */
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
-void paravirt_start_context_switch(struct task_struct *prev);
-void paravirt_end_context_switch(struct task_struct *next);
-
-void paravirt_enter_lazy_mmu(void);
-void paravirt_leave_lazy_mmu(void);
-
#define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(struct task_struct *prev)
{
@@ -1437,12 +745,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
-void _paravirt_nop(void);
-u32 _paravirt_ident_32(u32);
-u64 _paravirt_ident_64(u64);
-
-#define paravirt_nop ((void *)_paravirt_nop)
-
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
@@ -1479,17 +781,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
#endif
-/* These all sit in the .parainstructions section to tell us what to patch. */
-struct paravirt_patch_site {
- u8 *instr; /* original instructions */
- u8 instrtype; /* type of this instruction */
- u8 len; /* length of original instruction */
- u16 clobbers; /* what registers you may clobber */
-};
-
-extern struct paravirt_patch_site __parainstructions[],
- __parainstructions_end[];
-
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
new file mode 100644
index 000000000000..2b3371bae295
--- /dev/null
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -0,0 +1,720 @@
+#ifndef _ASM_X86_PARAVIRT_TYPES_H
+#define _ASM_X86_PARAVIRT_TYPES_H
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0
+#define CLBR_EAX (1 << 0)
+#define CLBR_ECX (1 << 1)
+#define CLBR_EDX (1 << 2)
+#define CLBR_EDI (1 << 3)
+
+#ifdef CONFIG_X86_32
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY ((1 << 4) - 1)
+
+#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
+#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
+#define CLBR_SCRATCH (0)
+#else
+#define CLBR_RAX CLBR_EAX
+#define CLBR_RCX CLBR_ECX
+#define CLBR_RDX CLBR_EDX
+#define CLBR_RDI CLBR_EDI
+#define CLBR_RSI (1 << 4)
+#define CLBR_R8 (1 << 5)
+#define CLBR_R9 (1 << 6)
+#define CLBR_R10 (1 << 7)
+#define CLBR_R11 (1 << 8)
+
+#define CLBR_ANY ((1 << 9) - 1)
+
+#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
+ CLBR_RCX | CLBR_R8 | CLBR_R9)
+#define CLBR_RET_REG (CLBR_RAX)
+#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
+
+#endif /* X86_64 */
+
+#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/desc_defs.h>
+#include <asm/kmap_types.h>
+
+struct page;
+struct thread_struct;
+struct desc_ptr;
+struct tss_struct;
+struct mm_struct;
+struct desc_struct;
+struct task_struct;
+struct cpumask;
+
+/*
+ * Wrapper type for pointers to code which uses the non-standard
+ * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
+ */
+struct paravirt_callee_save {
+ void *func;
+};
+
+/* general info */
+struct pv_info {
+ unsigned int kernel_rpl;
+ int shared_kernel_pmd;
+ int paravirt_enabled;
+ const char *name;
+};
+
+struct pv_init_ops {
+ /*
+ * Patch may replace one of the defined code sequences with
+ * arbitrary code, subject to the same register constraints.
+ * This generally means the code is not free to clobber any
+ * registers other than EAX. The patch function should return
+ * the number of bytes of code generated, as we nop pad the
+ * rest in generic code.
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+
+ /* Basic arch-specific setup */
+ void (*arch_setup)(void);
+ char *(*memory_setup)(void);
+ void (*post_allocator_init)(void);
+
+ /* Print a banner to identify the environment */
+ void (*banner)(void);
+};
+
+
+struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
+};
+
+struct pv_time_ops {
+ void (*time_init)(void);
+
+ /* Set and set time of day */
+ unsigned long (*get_wallclock)(void);
+ int (*set_wallclock)(unsigned long);
+
+ unsigned long long (*sched_clock)(void);
+ unsigned long (*get_tsc_khz)(void);
+};
+
+struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+ unsigned long (*get_debugreg)(int regno);
+ void (*set_debugreg)(int regno, unsigned long value);
+
+ void (*clts)(void);
+
+ unsigned long (*read_cr0)(void);
+ void (*write_cr0)(unsigned long);
+
+ unsigned long (*read_cr4_safe)(void);
+ unsigned long (*read_cr4)(void);
+ void (*write_cr4)(unsigned long);
+
+#ifdef CONFIG_X86_64
+ unsigned long (*read_cr8)(void);
+ void (*write_cr8)(unsigned long);
+#endif
+
+ /* Segment descriptor handling */
+ void (*load_tr_desc)(void);
+ void (*load_gdt)(const struct desc_ptr *);
+ void (*load_idt)(const struct desc_ptr *);
+ void (*store_gdt)(struct desc_ptr *);
+ void (*store_idt)(struct desc_ptr *);
+ void (*set_ldt)(const void *desc, unsigned entries);
+ unsigned long (*store_tr)(void);
+ void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+#ifdef CONFIG_X86_64
+ void (*load_gs_index)(unsigned int idx);
+#endif
+ void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
+ const void *desc);
+ void (*write_gdt_entry)(struct desc_struct *,
+ int entrynum, const void *desc, int size);
+ void (*write_idt_entry)(gate_desc *,
+ int entrynum, const gate_desc *gate);
+ void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
+ void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
+
+ void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
+
+ void (*set_iopl_mask)(unsigned mask);
+
+ void (*wbinvd)(void);
+ void (*io_delay)(void);
+
+ /* cpuid emulation, mostly so that caps bits can be disabled */
+ void (*cpuid)(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+
+ /* MSR, PMC and TSR operations.
+ err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
+ u64 (*read_msr_amd)(unsigned int msr, int *err);
+ u64 (*read_msr)(unsigned int msr, int *err);
+ int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+
+ u64 (*read_tsc)(void);
+ u64 (*read_pmc)(int counter);
+ unsigned long long (*read_tscp)(unsigned int *aux);
+
+ /*
+ * Atomically enable interrupts and return to userspace. This
+ * is only ever used to return to 32-bit processes; in a
+ * 64-bit kernel, it's used for 32-on-64 compat processes, but
+ * never native 64-bit processes. (Jump, not call.)
+ */
+ void (*irq_enable_sysexit)(void);
+
+ /*
+ * Switch to usermode gs and return to 64-bit usermode using
+ * sysret. Only used in 64-bit kernels to return to 64-bit
+ * processes. Usermode register state, including %rsp, must
+ * already be restored.
+ */
+ void (*usergs_sysret64)(void);
+
+ /*
+ * Switch to usermode gs and return to 32-bit usermode using
+ * sysret. Used to return to 32-on-64 compat processes.
+ * Other usermode register state, including %esp, must already
+ * be restored.
+ */
+ void (*usergs_sysret32)(void);
+
+ /* Normal iret. Jump to this with the standard iret stack
+ frame set up. */
+ void (*iret)(void);
+
+ void (*swapgs)(void);
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+};
+
+struct pv_irq_ops {
+ void (*init_IRQ)(void);
+
+ /*
+ * Get/set interrupt state. save_fl and restore_fl are only
+ * expected to use X86_EFLAGS_IF; all other bits
+ * returned from save_fl are undefined, and may be ignored by
+ * restore_fl.
+ *
+ * NOTE: These functions callers expect the callee to preserve
+ * more registers than the standard C calling convention.
+ */
+ struct paravirt_callee_save save_fl;
+ struct paravirt_callee_save restore_fl;
+ struct paravirt_callee_save irq_disable;
+ struct paravirt_callee_save irq_enable;
+
+ void (*safe_halt)(void);
+ void (*halt)(void);
+
+#ifdef CONFIG_X86_64
+ void (*adjust_exception_frame)(void);
+#endif
+};
+
+struct pv_apic_ops {
+#ifdef CONFIG_X86_LOCAL_APIC
+ void (*setup_boot_clock)(void);
+ void (*setup_secondary_clock)(void);
+
+ void (*startup_ipi_hook)(int phys_apicid,
+ unsigned long start_eip,
+ unsigned long start_esp);
+#endif
+};
+
+struct pv_mmu_ops {
+ /*
+ * Called before/after init_mm pagetable setup. setup_start
+ * may reset %cr3, and may pre-install parts of the pagetable;
+ * pagetable setup is expected to preserve any existing
+ * mapping.
+ */
+ void (*pagetable_setup_start)(pgd_t *pgd_base);
+ void (*pagetable_setup_done)(pgd_t *pgd_base);
+
+ unsigned long (*read_cr2)(void);
+ void (*write_cr2)(unsigned long);
+
+ unsigned long (*read_cr3)(void);
+ void (*write_cr3)(unsigned long);
+
+ /*
+ * Hooks for intercepting the creation/use/destruction of an
+ * mm_struct.
+ */
+ void (*activate_mm)(struct mm_struct *prev,
+ struct mm_struct *next);
+ void (*dup_mmap)(struct mm_struct *oldmm,
+ struct mm_struct *mm);
+ void (*exit_mmap)(struct mm_struct *mm);
+
+
+ /* TLB operations */
+ void (*flush_tlb_user)(void);
+ void (*flush_tlb_kernel)(void);
+ void (*flush_tlb_single)(unsigned long addr);
+ void (*flush_tlb_others)(const struct cpumask *cpus,
+ struct mm_struct *mm,
+ unsigned long va);
+
+ /* Hooks for allocating and freeing a pagetable top-level */
+ int (*pgd_alloc)(struct mm_struct *mm);
+ void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
+
+ /*
+ * Hooks for allocating/releasing pagetable pages when they're
+ * attached to a pagetable
+ */
+ void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
+ void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
+ void (*release_pte)(unsigned long pfn);
+ void (*release_pmd)(unsigned long pfn);
+ void (*release_pud)(unsigned long pfn);
+
+ /* Pagetable manipulation functions */
+ void (*set_pte)(pte_t *ptep, pte_t pteval);
+ void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+ void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+ void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*pte_update_defer)(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+ pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+ struct paravirt_callee_save pte_val;
+ struct paravirt_callee_save make_pte;
+
+ struct paravirt_callee_save pgd_val;
+ struct paravirt_callee_save make_pgd;
+
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+ void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*pmd_clear)(pmd_t *pmdp);
+
+#endif /* CONFIG_X86_PAE */
+
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
+
+ struct paravirt_callee_save pmd_val;
+ struct paravirt_callee_save make_pmd;
+
+#if PAGETABLE_LEVELS == 4
+ struct paravirt_callee_save pud_val;
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+#ifdef CONFIG_HIGHPTE
+ void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
+#endif
+
+ struct pv_lazy_ops lazy_mode;
+
+ /* dom0 ops */
+
+ /* Sometimes the physical address is a pfn, and sometimes its
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
+};
+
+struct raw_spinlock;
+struct pv_lock_ops {
+ int (*spin_is_locked)(struct raw_spinlock *lock);
+ int (*spin_is_contended)(struct raw_spinlock *lock);
+ void (*spin_lock)(struct raw_spinlock *lock);
+ void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct raw_spinlock *lock);
+ void (*spin_unlock)(struct raw_spinlock *lock);
+};
+
+/* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+ * what to patch. */
+struct paravirt_patch_template {
+ struct pv_init_ops pv_init_ops;
+ struct pv_time_ops pv_time_ops;
+ struct pv_cpu_ops pv_cpu_ops;
+ struct pv_irq_ops pv_irq_ops;
+ struct pv_apic_ops pv_apic_ops;
+ struct pv_mmu_ops pv_mmu_ops;
+ struct pv_lock_ops pv_lock_ops;
+};
+
+extern struct pv_info pv_info;
+extern struct pv_init_ops pv_init_ops;
+extern struct pv_time_ops pv_time_ops;
+extern struct pv_cpu_ops pv_cpu_ops;
+extern struct pv_irq_ops pv_irq_ops;
+extern struct pv_apic_ops pv_apic_ops;
+extern struct pv_mmu_ops pv_mmu_ops;
+extern struct pv_lock_ops pv_lock_ops;
+
+#define PARAVIRT_PATCH(x) \
+ (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
+
+#define paravirt_type(op) \
+ [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
+ [paravirt_opptr] "i" (&(op))
+#define paravirt_clobber(clobber) \
+ [paravirt_clobber] "i" (clobber)
+
+/*
+ * Generate some code, and mark it as patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR " 771b\n" \
+ " .byte " type "\n" \
+ " .byte 772b-771b\n" \
+ " .short " clobber "\n" \
+ ".popsection\n"
+
+/* Generate patchable code, with the default asm parameters. */
+#define paravirt_alt(insn_string) \
+ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
+
+/* Simple instruction patching code. */
+#define DEF_NATIVE(ops, name, code) \
+ extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
+
+unsigned paravirt_patch_nop(void);
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ignore(unsigned len);
+unsigned paravirt_patch_call(void *insnbuf,
+ const void *target, u16 tgt_clobbers,
+ unsigned long addr, u16 site_clobbers,
+ unsigned len);
+unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+ unsigned long addr, unsigned len);
+unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len);
+
+unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+ const char *start, const char *end);
+
+unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ unsigned long addr, unsigned len);
+
+int paravirt_disable_iospace(void);
+
+/*
+ * This generates an indirect call based on the operation type number.
+ * The type number, computed in PARAVIRT_PATCH, is derived from the
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
+
+/*
+ * These macros are intended to wrap calls through one of the paravirt
+ * ops structs, so that they can be later identified and patched at
+ * runtime.
+ *
+ * Normally, a call to a pv_op function is a simple indirect call:
+ * (pv_op_struct.operations)(args...).
+ *
+ * Unfortunately, this is a relatively slow operation for modern CPUs,
+ * because it cannot necessarily determine what the destination
+ * address is. In this case, the address is a runtime constant, so at
+ * the very least we can patch the call to e a simple direct call, or
+ * ideally, patch an inline implementation into the callsite. (Direct
+ * calls are essentially free, because the call and return addresses
+ * are completely predictable.)
+ *
+ * For i386, these macros rely on the standard gcc "regparm(3)" calling
+ * convention, in which the first three arguments are placed in %eax,
+ * %edx, %ecx (in that order), and the remaining arguments are placed
+ * on the stack. All caller-save registers (eax,edx,ecx) are expected
+ * to be modified (either clobbered or used for return values).
+ * X86_64, on the other hand, already specifies a register-based calling
+ * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
+ * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
+ * special handling for dealing with 4 arguments, unlike i386.
+ * However, x86_64 also have to clobber all caller saved registers, which
+ * unfortunately, are quite a bit (r8 - r11)
+ *
+ * The call instruction itself is marked by placing its start address
+ * and size into the .parainstructions section, so that
+ * apply_paravirt() in arch/i386/kernel/alternative.c can do the
+ * appropriate patching under the control of the backend pv_init_ops
+ * implementation.
+ *
+ * Unfortunately there's no way to get gcc to generate the args setup
+ * for the call, and then allow the call itself to be generated by an
+ * inline asm. Because of this, we must do the complete arg setup and
+ * return value handling from within these macros. This is fairly
+ * cumbersome.
+ *
+ * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
+ * It could be extended to more arguments, but there would be little
+ * to be gained from that. For each number of arguments, there are
+ * the two VCALL and CALL variants for void and non-void functions.
+ *
+ * When there is a return value, the invoker of the macro must specify
+ * the return type. The macro then uses sizeof() on that type to
+ * determine whether its a 32 or 64 bit value, and places the return
+ * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
+ * 64-bit). For x86_64 machines, it just returns at %rax regardless of
+ * the return value size.
+ *
+ * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
+ * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
+ * in low,high order
+ *
+ * Small structures are passed and returned in registers. The macro
+ * calling convention can't directly deal with this, so the wrapper
+ * functions must do this.
+ *
+ * These PVOP_* macros are only defined within this header. This
+ * means that all uses must be wrapped in inline functions. This also
+ * makes sure the incoming and outgoing types are always correct.
+ */
+#ifdef CONFIG_X86_32
+#define PVOP_VCALL_ARGS \
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
+
+#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
+ "=c" (__ecx)
+#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
+
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
+#define EXTRA_CLOBBERS
+#define VEXTRA_CLOBBERS
+#else /* CONFIG_X86_64 */
+#define PVOP_VCALL_ARGS \
+ unsigned long __edi = __edi, __esi = __esi, \
+ __edx = __edx, __ecx = __ecx
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
+
+#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
+
+#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
+ "=S" (__esi), "=d" (__edx), \
+ "=c" (__ecx)
+#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
+
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
+#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
+#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_PARAVIRT_DEBUG
+#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
+#else
+#define PVOP_TEST_NULL(op) ((void)op)
+#endif
+
+#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
+ pre, post, ...) \
+ ({ \
+ rettype __ret; \
+ PVOP_CALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ /* This is 32-bit specific, but is okay in 64-bit */ \
+ /* since this condition will never hold */ \
+ if (sizeof(rettype) > sizeof(unsigned long)) { \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
+ } else { \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ __ret = (rettype)__eax; \
+ } \
+ __ret; \
+ })
+
+#define __PVOP_CALL(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
+ EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_CALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
+ ({ \
+ PVOP_VCALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ })
+
+#define __PVOP_VCALL(op, pre, post, ...) \
+ ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
+ VEXTRA_CLOBBERS, \
+ pre, post, ##__VA_ARGS__)
+
+#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+
+#define PVOP_CALL0(rettype, op) \
+ __PVOP_CALL(rettype, op, "", "")
+#define PVOP_VCALL0(op) \
+ __PVOP_VCALL(op, "", "")
+
+#define PVOP_CALLEE0(rettype, op) \
+ __PVOP_CALLEESAVE(rettype, op, "", "")
+#define PVOP_VCALLEE0(op) \
+ __PVOP_VCALLEESAVE(op, "", "")
+
+
+#define PVOP_CALL1(rettype, op, arg1) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALL1(op, arg1) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+#define PVOP_CALLEE1(rettype, op, arg1) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALLEE1(op, arg1) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+
+
+#define PVOP_CALL2(rettype, op, arg1, arg2) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALL2(op, arg1, arg2) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALLEE2(op, arg1, arg2) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+
+#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
+#define PVOP_VCALL3(op, arg1, arg2, arg3) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
+
+/* This is the only difference in x86_64. We can make it much simpler */
+#ifdef CONFIG_X86_32
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
+ __PVOP_CALL(rettype, op, \
+ "push %[_arg4];", "lea 4(%%esp),%%esp;", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
+ __PVOP_VCALL(op, \
+ "push %[_arg4];", "lea 4(%%esp),%%esp;", \
+ "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
+ "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+#else
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
+ __PVOP_CALL(rettype, op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
+ __PVOP_VCALL(op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
+#endif
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
+void paravirt_start_context_switch(struct task_struct *prev);
+void paravirt_end_context_switch(struct task_struct *next);
+
+void paravirt_enter_lazy_mmu(void);
+void paravirt_leave_lazy_mmu(void);
+
+void _paravirt_nop(void);
+u32 _paravirt_ident_32(u32);
+u64 _paravirt_ident_64(u64);
+
+#define paravirt_nop ((void *)_paravirt_nop)
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch_site {
+ u8 *instr; /* original instructions */
+ u8 instrtype; /* type of this instruction */
+ u8 len; /* length of original instruction */
+ u16 clobbers; /* what registers you may clobber */
+};
+
+extern struct paravirt_patch_site __parainstructions[],
+ __parainstructions_end[];
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 927958d13c19..f76a162c082c 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -48,7 +48,6 @@ extern unsigned int pcibios_assign_all_busses(void);
#else
#define pcibios_assign_all_busses() 0
#endif
-#define pcibios_scan_all_fns(a, b) 0
extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_IO 0x1000
@@ -91,7 +90,7 @@ extern void pci_iommu_alloc(void);
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-#if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG)
+#if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 02ecb30982a3..a18c038a3079 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -42,6 +42,7 @@
#else /* ...!ASSEMBLY */
+#include <linux/kernel.h>
#include <linux/stringify.h>
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 5fb33e160ea0..fa64e401589d 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -87,6 +87,9 @@ union cpuid10_edx {
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void);
+
+#define PERF_COUNTER_INDEX_OFFSET 0
+
#else
static inline void init_hw_perf_counters(void) { }
static inline void perf_counters_lapic_init(void) { }
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3cc06e3fceb8..9de8729c1c8f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -134,6 +134,11 @@ static inline unsigned long pte_pfn(pte_t pte)
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
@@ -351,7 +356,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
-static inline unsigned pmd_index(unsigned long address)
+static inline unsigned long pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
@@ -371,7 +376,7 @@ static inline unsigned pmd_index(unsigned long address)
* this function returns the index of the entry in the pte page which would
* control the given virtual address
*/
-static inline unsigned pte_index(unsigned long address)
+static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
@@ -422,11 +427,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
-static inline unsigned long pmd_pfn(pmd_t pmd)
-{
- return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
-}
-
static inline int pud_large(pud_t pud)
{
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -462,7 +462,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory. */
-static inline unsigned pud_index(unsigned long address)
+static inline unsigned long pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 49fb3ecf3bb3..621f56d73121 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -22,7 +22,14 @@ extern int reboot_force;
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
-#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
-#define round_down(x, y) ((x) & ~((y) - 1))
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x,y) ((__typeof__(x))((y)-1))
+#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
+#define round_down(x,y) ((x) & ~__round_mask(x,y))
#endif /* _ASM_X86_PROTO_H */
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 263d397d2eef..75af592677ec 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,33 +1,8 @@
#ifndef _ASM_X86_SCATTERLIST_H
#define _ASM_X86_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
- dma_addr_t dma_address;
- unsigned int dma_length;
-};
-
-#define ARCH_HAS_SG_CHAIN
#define ISA_DMA_THRESHOLD (0x00ffffff)
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#ifdef CONFIG_X86_32
-# define sg_dma_len(sg) ((sg)->length)
-#else
-# define sg_dma_len(sg) ((sg)->dma_length)
-#endif
+#include <asm-generic/scatterlist.h>
#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h
index b51413b74971..83c05fc2de38 100644
--- a/arch/x86/include/asm/shmbuf.h
+++ b/arch/x86/include/asm/shmbuf.h
@@ -1,51 +1 @@
-#ifndef _ASM_X86_SHMBUF_H
-#define _ASM_X86_SHMBUF_H
-
-/*
- * The shmid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space on 32 bit is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- *
- * Pad space on 64 bit is left for:
- * - 2 miscellaneous 64-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
-#ifdef __i386__
- unsigned long __unused1;
-#endif
- __kernel_time_t shm_dtime; /* last detach time */
-#ifdef __i386__
- unsigned long __unused2;
-#endif
- __kernel_time_t shm_ctime; /* last change time */
-#ifdef __i386__
- unsigned long __unused3;
-#endif
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _ASM_X86_SHMBUF_H */
+#include <asm-generic/shmbuf.h>
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 6a84ed166aec..1e796782cd7b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu)
smp_ops.send_call_func_single_ipi(cpu);
}
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_ops.send_call_func_ipi(mask);
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h
index ca8bf2cd0ba9..6b71384b9d8b 100644
--- a/arch/x86/include/asm/socket.h
+++ b/arch/x86/include/asm/socket.h
@@ -1,60 +1 @@
-#ifndef _ASM_X86_SOCKET_H
-#define _ASM_X86_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#endif /* _ASM_X86_SOCKET_H */
+#include <asm-generic/socket.h>
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h
index 49cc72b5d3c9..def6d4746ee7 100644
--- a/arch/x86/include/asm/sockios.h
+++ b/arch/x86/include/asm/sockios.h
@@ -1,13 +1 @@
-#ifndef _ASM_X86_SOCKIOS_H
-#define _ASM_X86_SOCKIOS_H
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* _ASM_X86_SOCKIOS_H */
+#include <asm-generic/sockios.h>
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index f517944b2b17..cf86a5e73815 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -3,6 +3,8 @@
extern int kstack_depth_to_print;
+int x86_is_stack_id(int id, char *name);
+
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h
index af1b70ea440f..3935b106de79 100644
--- a/arch/x86/include/asm/termbits.h
+++ b/arch/x86/include/asm/termbits.h
@@ -1,198 +1 @@
-#ifndef _ASM_X86_TERMBITS_H
-#define _ASM_X86_TERMBITS_H
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000 /* non standard rate */
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* _ASM_X86_TERMBITS_H */
+#include <asm-generic/termbits.h>
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h
index c4ee8056baca..280d78a9d966 100644
--- a/arch/x86/include/asm/termios.h
+++ b/arch/x86/include/asm/termios.h
@@ -1,114 +1 @@
-#ifndef _ASM_X86_TERMIOS_H
-#define _ASM_X86_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
-
-#include <asm/uaccess.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- *(unsigned short *) &(termios)->x = __tmp; \
-}
-
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
- get_user(termios->c_line, &termio->c_line);
- return copy_from_user(termios->c_cc, termio->c_cc, NCC);
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- put_user((termios)->c_iflag, &(termio)->c_iflag);
- put_user((termios)->c_oflag, &(termio)->c_oflag);
- put_user((termios)->c_cflag, &(termio)->c_cflag);
- put_user((termios)->c_lflag, &(termio)->c_lflag);
- put_user((termios)->c_line, &(termio)->c_line);
- return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
-}
-
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_X86_TERMIOS_H */
+#include <asm-generic/termios.h>
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
index 09b97745772f..df1da20f4534 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/asm/types.h
@@ -1,19 +1,11 @@
#ifndef _ASM_X86_TYPES_H
#define _ASM_X86_TYPES_H
-#include <asm-generic/int-ll64.h>
+#define dma_addr_t dma_addr_t
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
+#include <asm-generic/types.h>
-#endif /* __ASSEMBLY__ */
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
#ifdef __KERNEL__
-
#ifndef __ASSEMBLY__
typedef u64 dma64_addr_t;
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h
index 87324cf439d9..b7c29c8017f2 100644
--- a/arch/x86/include/asm/ucontext.h
+++ b/arch/x86/include/asm/ucontext.h
@@ -7,12 +7,6 @@
* sigcontext struct (uc_mcontext).
*/
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
+#include <asm-generic/ucontext.h>
#endif /* _ASM_X86_UCONTEXT_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 11be5ad2e0e9..272514c2d456 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -55,6 +55,7 @@
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
+#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -351,9 +352,16 @@ enum vmcs_field {
#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_EPT_EXTENT_CONTEXT 1
#define VMX_EPT_EXTENT_GLOBAL 2
+
+#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
+#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
+#define VMX_EPTP_UC_BIT (1ull << 8)
+#define VMX_EPTP_WB_BIT (1ull << 14)
+#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
+
#define VMX_EPT_DEFAULT_GAW 3
#define VMX_EPT_MAX_GAW 0x4
#define VMX_EPT_MT_EPTE_SHIFT 3
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 8c44c232efcb..827b5207523e 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -63,6 +63,12 @@ struct cstate_entry {
};
static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
+/* Used for the cross-CPU calls */
+struct acpi_processor_cx_cross_cpu {
+ struct acpi_processor_cx *cx;
+ long retval;
+};
+
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
#define MWAIT_SUBSTATE_MASK (0xf)
@@ -77,10 +83,10 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
#define NATIVE_CSTATE_BEYOND_HALT (2)
-static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
+static void acpi_processor_ffh_cstate_probe_cpu(void *_cxcc)
{
- struct acpi_processor_cx *cx = _cx;
- long retval;
+ struct acpi_processor_cx_cross_cpu *cxcc = _cxcc;
+ struct acpi_processor_cx *cx = cxcc->cx;
unsigned int eax, ebx, ecx, edx;
unsigned int edx_part;
unsigned int cstate_type; /* C-state type and not ACPI C-state type */
@@ -94,16 +100,16 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
- retval = 0;
+ cxcc->retval = 0;
if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
- retval = -1;
+ cxcc->retval = -1;
goto out;
}
/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
- retval = -1;
+ cxcc->retval = -1;
goto out;
}
@@ -117,7 +123,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
cx->address);
out:
- return retval;
+ return;
}
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
@@ -125,6 +131,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
{
struct cstate_entry *percpu_entry;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct acpi_processor_cx_cross_cpu cxcc = { .cx = cx, };
long retval;
if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
@@ -137,13 +144,18 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
percpu_entry->states[cx->index].eax = 0;
percpu_entry->states[cx->index].ecx = 0;
- /* Make sure we are running on right CPU */
+ /* Run acpi_processor_ffh_cstate_probe_cpu() on the target CPU */
- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
+ retval = smp_call_function_single(cpu,
+ acpi_processor_ffh_cstate_probe_cpu, &cxcc, 1);
if (retval == 0) {
- /* Use the hint in CST */
- percpu_entry->states[cx->index].eax = cx->address;
- percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
+ retval = cxcc.retval;
+ if (retval == 0) {
+ /* Use the hint in CST */
+ percpu_entry->states[cx->index].eax = cx->address;
+ percpu_entry->states[cx->index].ecx =
+ MWAIT_ECX_INTERRUPT_BREAK;
+ }
}
return retval;
}
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 9372f0406ad4..6c99f5037801 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1192,7 +1192,7 @@ out:
return 0;
}
-struct notifier_block device_nb = {
+static struct notifier_block device_nb = {
.notifier_call = device_change_notifier,
};
@@ -1763,7 +1763,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO;
virt_addr = (void *)__get_free_pages(flag, get_order(size));
if (!virt_addr)
- return 0;
+ return NULL;
paddr = virt_to_phys(virt_addr);
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 10b2accd12ea..c1b17e97252e 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -472,6 +472,8 @@ static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
if (iommu->evt_buf == NULL)
return NULL;
+ iommu->evt_buf_size = EVT_BUFFER_SIZE;
+
return iommu->evt_buf;
}
@@ -691,6 +693,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid = e->devid;
devid_to = e->ext >> 8;
+ set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
amd_iommu_alias_table[devid] = devid_to;
break;
@@ -749,11 +752,13 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias)
+ if (alias) {
amd_iommu_alias_table[dev_i] = devid_to;
- set_dev_entry_from_acpi(iommu,
- amd_iommu_alias_table[dev_i],
- flags, ext_flags);
+ set_dev_entry_from_acpi(iommu,
+ devid_to, flags, ext_flags);
+ }
+ set_dev_entry_from_acpi(iommu, dev_i,
+ flags, ext_flags);
}
break;
default:
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 8c7c042ecad1..0a1c2830ec66 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -140,7 +140,6 @@ int x2apic_mode;
#ifdef CONFIG_X86_X2APIC
/* x2apic enabled before OS handover */
static int x2apic_preenabled;
-static int disable_x2apic;
static __init int setup_nox2apic(char *str)
{
if (x2apic_enabled()) {
@@ -149,7 +148,6 @@ static __init int setup_nox2apic(char *str)
return 0;
}
- disable_x2apic = 1;
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
return 0;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4d0216fcb36c..75a198f2636f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -215,17 +215,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
if (cfg) {
- if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
+ if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
kfree(cfg);
cfg = NULL;
- } else if (!alloc_cpumask_var_node(&cfg->old_domain,
+ } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
GFP_ATOMIC, node)) {
free_cpumask_var(cfg->domain);
kfree(cfg);
cfg = NULL;
- } else {
- cpumask_clear(cfg->domain);
- cpumask_clear(cfg->old_domain);
}
}
@@ -1716,25 +1713,19 @@ __apicdebuginit(void) print_IO_APIC(void)
return;
}
-__apicdebuginit(void) print_APIC_bitfield(int base)
+__apicdebuginit(void) print_APIC_field(int base)
{
- unsigned int v;
- int i, j;
+ int i;
if (apic_verbosity == APIC_QUIET)
return;
- printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
- for (i = 0; i < 8; i++) {
- v = apic_read(base + i*0x10);
- for (j = 0; j < 32; j++) {
- if (v & (1<<j))
- printk("1");
- else
- printk("0");
- }
- printk("\n");
- }
+ printk(KERN_DEBUG);
+
+ for (i = 0; i < 8; i++)
+ printk(KERN_CONT "%08x", apic_read(base + i*0x10));
+
+ printk(KERN_CONT "\n");
}
__apicdebuginit(void) print_local_APIC(void *dummy)
@@ -1745,7 +1736,8 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
if (apic_verbosity == APIC_QUIET)
return;
- printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
+ printk(KERN_DEBUG "\n");
+ printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
smp_processor_id(), hard_smp_processor_id());
v = apic_read(APIC_ID);
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
@@ -1786,11 +1778,11 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
printk(KERN_DEBUG "... APIC ISR field:\n");
- print_APIC_bitfield(APIC_ISR);
+ print_APIC_field(APIC_ISR);
printk(KERN_DEBUG "... APIC TMR field:\n");
- print_APIC_bitfield(APIC_TMR);
+ print_APIC_field(APIC_TMR);
printk(KERN_DEBUG "... APIC IRR field:\n");
- print_APIC_bitfield(APIC_IRR);
+ print_APIC_field(APIC_IRR);
if (APIC_INTEGRATED(ver)) { /* !82489DX */
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e5b27d8f1b47..28e5f5956042 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -258,13 +258,15 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
unsigned bits;
+ int cpu = smp_processor_id();
bits = c->x86_coreid_bits;
-
/* Low order bits define the core id (index of core in socket) */
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
+ /* use socket ID also for last level cache */
+ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
#endif
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6b26d4deada0..f1961c07af9a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -848,9 +848,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
numa_add_cpu(smp_processor_id());
#endif
-
- /* Cap the iomem address space to what is addressable on all CPUs */
- iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1;
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 6b2a52dd0403..dca325c03999 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,8 +30,8 @@
#include <asm/apic.h>
#include <asm/desc.h>
-static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
-static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
static DEFINE_PER_CPU(int, cpu_priv_count);
static DEFINE_MUTEX(cpu_debug_lock);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 284d1de968bc..0f504296a5f0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -242,7 +242,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
/*
* Make sure only one CPU runs in machine check panic
*/
- if (atomic_add_return(1, &mce_paniced) > 1)
+ if (atomic_inc_return(&mce_paniced) > 1)
wait_for_panic();
barrier();
@@ -705,7 +705,7 @@ static int mce_start(int *no_way_out)
* global_nwo should be updated before mce_callin
*/
smp_wmb();
- order = atomic_add_return(1, &mce_callin);
+ order = atomic_inc_return(&mce_callin);
/*
* Wait for everyone.
@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
*/
static int check_interval = 5 * 60; /* 5 minutes */
-static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
+static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
static void mcheck_timer(unsigned long data)
@@ -1110,14 +1110,14 @@ static void mcheck_timer(unsigned long data)
* Alert userspace if needed. If we logged an MCE, reduce the
* polling interval, otherwise increase the polling interval.
*/
- n = &__get_cpu_var(next_interval);
+ n = &__get_cpu_var(mce_next_interval);
if (mce_notify_irq())
*n = max(*n/2, HZ/100);
else
*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
t->expires = jiffies + *n;
- add_timer(t);
+ add_timer_on(t, smp_processor_id());
}
static void mce_do_trigger(struct work_struct *work)
@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
static void mce_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
- int *n = &__get_cpu_var(next_interval);
+ int *n = &__get_cpu_var(mce_next_interval);
if (mce_ignore_ce)
return;
@@ -1321,7 +1321,7 @@ static void mce_init_timer(void)
return;
setup_timer(t, mcheck_timer, smp_processor_id());
t->expires = round_jiffies(jiffies + *n);
- add_timer(t);
+ add_timer_on(t, smp_processor_id());
}
/*
@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
t->expires = round_jiffies(jiffies +
- __get_cpu_var(next_interval));
+ __get_cpu_var(mce_next_interval));
add_timer_on(t, cpu);
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ddae21620bda..bd2a2fa84628 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -69,7 +69,7 @@ struct threshold_bank {
struct threshold_block *blocks;
cpumask_var_t cpus;
};
-static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
+static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index ee2331b0e58f..33af14110dfd 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -7,15 +7,15 @@
static void
amd_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type)
+ unsigned long *size, mtrr_type *type)
{
unsigned long low, high;
rdmsr(MSR_K6_UWCCR, low, high);
- /* Upper dword is region 1, lower is region 0 */
+ /* Upper dword is region 1, lower is region 0 */
if (reg == 1)
low = high;
- /* The base masks off on the right alignment */
+ /* The base masks off on the right alignment */
*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
*type = 0;
if (low & 1)
@@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base,
return;
}
/*
- * This needs a little explaining. The size is stored as an
- * inverted mask of bits of 128K granularity 15 bits long offset
- * 2 bits
+ * This needs a little explaining. The size is stored as an
+ * inverted mask of bits of 128K granularity 15 bits long offset
+ * 2 bits.
*
- * So to get a size we do invert the mask and add 1 to the lowest
- * mask bit (4 as its 2 bits in). This gives us a size we then shift
- * to turn into 128K blocks
+ * So to get a size we do invert the mask and add 1 to the lowest
+ * mask bit (4 as its 2 bits in). This gives us a size we then shift
+ * to turn into 128K blocks.
*
- * eg 111 1111 1111 1100 is 512K
+ * eg 111 1111 1111 1100 is 512K
*
- * invert 000 0000 0000 0011
- * +1 000 0000 0000 0100
- * *128K ...
+ * invert 000 0000 0000 0011
+ * +1 000 0000 0000 0100
+ * *128K ...
*/
low = (~low) & 0x1FFFC;
*size = (low + 4) << (15 - PAGE_SHIFT);
- return;
}
-static void amd_set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-/* [SUMMARY] Set variable MTRR register on the local CPU.
- <reg> The register to set.
- <base> The base address of the region.
- <size> The size of the region. If this is 0 the region is disabled.
- <type> The type of the region.
- [RETURNS] Nothing.
-*/
+/**
+ * amd_set_mtrr - Set variable MTRR register on the local CPU.
+ *
+ * @reg The register to set.
+ * @base The base address of the region.
+ * @size The size of the region. If this is 0 the region is disabled.
+ * @type The type of the region.
+ *
+ * Returns nothing.
+ */
+static void
+amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
u32 regs[2];
/*
- * Low is MTRR0 , High MTRR 1
+ * Low is MTRR0, High MTRR 1
*/
rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
/*
- * Blank to disable
+ * Blank to disable
*/
- if (size == 0)
+ if (size == 0) {
regs[reg] = 0;
- else
- /* Set the register to the base, the type (off by one) and an
- inverted bitmask of the size The size is the only odd
- bit. We are fed say 512K We invert this and we get 111 1111
- 1111 1011 but if you subtract one and invert you get the
- desired 111 1111 1111 1100 mask
-
- But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
+ } else {
+ /*
+ * Set the register to the base, the type (off by one) and an
+ * inverted bitmask of the size The size is the only odd
+ * bit. We are fed say 512K We invert this and we get 111 1111
+ * 1111 1011 but if you subtract one and invert you get the
+ * desired 111 1111 1111 1100 mask
+ *
+ * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
+ */
regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
| (base << PAGE_SHIFT) | (type + 1);
+ }
/*
- * The writeback rule is quite specific. See the manual. Its
- * disable local interrupts, write back the cache, set the mtrr
+ * The writeback rule is quite specific. See the manual. Its
+ * disable local interrupts, write back the cache, set the mtrr
*/
wbinvd();
wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
}
-static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
+static int
+amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
{
- /* Apply the K6 block alignment and size rules
- In order
- o Uncached or gathering only
- o 128K or bigger block
- o Power of 2 block
- o base suitably aligned to the power
- */
+ /*
+ * Apply the K6 block alignment and size rules
+ * In order
+ * o Uncached or gathering only
+ * o 128K or bigger block
+ * o Power of 2 block
+ * o base suitably aligned to the power
+ */
if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
|| (size & ~(size - 1)) - size || (base & (size - 1)))
return -EINVAL;
@@ -115,5 +122,3 @@ int __init amd_init_mtrr(void)
set_mtrr_ops(&amd_mtrr_ops);
return 0;
}
-
-//arch_initcall(amd_mtrr_init);
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index cb9aa3a7a7ab..de89f14eff3a 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -1,7 +1,9 @@
#include <linux/init.h>
#include <linux/mm.h>
+
#include <asm/mtrr.h>
#include <asm/msr.h>
+
#include "mtrr.h"
static struct {
@@ -12,25 +14,25 @@ static struct {
static u8 centaur_mcr_reserved;
static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
-/*
- * Report boot time MCR setups
+/**
+ * centaur_get_free_region - Get a free MTRR.
+ *
+ * @base: The starting (base) address of the region.
+ * @size: The size (in bytes) of the region.
+ *
+ * Returns: the index of the region on success, else -1 on error.
*/
-
static int
centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-/* [SUMMARY] Get a free MTRR.
- <base> The starting (base) address of the region.
- <size> The size (in bytes) of the region.
- [RETURNS] The index of the region on success, else -1 on error.
-*/
{
- int i, max;
- mtrr_type ltype;
unsigned long lbase, lsize;
+ mtrr_type ltype;
+ int i, max;
max = num_var_ranges;
if (replace_reg >= 0 && replace_reg < max)
return replace_reg;
+
for (i = 0; i < max; ++i) {
if (centaur_mcr_reserved & (1 << i))
continue;
@@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
if (lsize == 0)
return i;
}
+
return -ENOSPC;
}
-void
-mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+/*
+ * Report boot time MCR setups
+ */
+void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
centaur_mcr[mcr].low = lo;
centaur_mcr[mcr].high = hi;
@@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base,
{
*base = centaur_mcr[reg].high >> PAGE_SHIFT;
*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
- *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
+ *type = MTRR_TYPE_WRCOMB; /* write-combining */
+
if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
*type = MTRR_TYPE_UNCACHABLE;
if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
*type = MTRR_TYPE_WRBACK;
if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
*type = MTRR_TYPE_WRBACK;
-
}
-static void centaur_set_mcr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
+static void
+centaur_set_mcr(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
{
unsigned long low, high;
if (size == 0) {
- /* Disable */
+ /* Disable */
high = low = 0;
} else {
high = base << PAGE_SHIFT;
- if (centaur_mcr_type == 0)
- low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
- else {
+ if (centaur_mcr_type == 0) {
+ /* Only support write-combining... */
+ low = -size << PAGE_SHIFT | 0x1f;
+ } else {
if (type == MTRR_TYPE_UNCACHABLE)
- low = -size << PAGE_SHIFT | 0x02; /* NC */
+ low = -size << PAGE_SHIFT | 0x02; /* NC */
else
- low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */
+ low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */
}
}
centaur_mcr[reg].high = high;
@@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base,
wrmsr(MSR_IDT_MCR0 + reg, low, high);
}
-#if 0
-/*
- * Initialise the later (saner) Winchip MCR variant. In this version
- * the BIOS can pass us the registers it has used (but not their values)
- * and the control register is read/write
- */
-
-static void __init
-centaur_mcr1_init(void)
-{
- unsigned i;
- u32 lo, hi;
-
- /* Unfortunately, MCR's are read-only, so there is no way to
- * find out what the bios might have done.
- */
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */
- lo &= ~0x1C0; /* clear key */
- lo |= 0x040; /* set key to 1 */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */
- }
-
- centaur_mcr_type = 1;
-
- /*
- * Clear any unconfigured MCR's.
- */
-
- for (i = 0; i < 8; ++i) {
- if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
- if (!(lo & (1 << (9 + i))))
- wrmsr(MSR_IDT_MCR0 + i, 0, 0);
- else
- /*
- * If the BIOS set up an MCR we cannot see it
- * but we don't wish to obliterate it
- */
- centaur_mcr_reserved |= (1 << i);
- }
- }
- /*
- * Throw the main write-combining switch...
- * However if OOSTORE is enabled then people have already done far
- * cleverer things and we should behave.
- */
-
- lo |= 15; /* Write combine enables */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-/*
- * Initialise the original winchip with read only MCR registers
- * no used bitmask for the BIOS to pass on and write only control
- */
-
-static void __init
-centaur_mcr0_init(void)
-{
- unsigned i;
-
- /* Unfortunately, MCR's are read-only, so there is no way to
- * find out what the bios might have done.
- */
-
- /* Clear any unconfigured MCR's.
- * This way we are sure that the centaur_mcr array contains the actual
- * values. The disadvantage is that any BIOS tweaks are thus undone.
- *
- */
- for (i = 0; i < 8; ++i) {
- if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
- wrmsr(MSR_IDT_MCR0 + i, 0, 0);
- }
-
- wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */
-}
-
-/*
- * Initialise Winchip series MCR registers
- */
-
-static void __init
-centaur_mcr_init(void)
-{
- struct set_mtrr_context ctxt;
-
- set_mtrr_prepare_save(&ctxt);
- set_mtrr_cache_disable(&ctxt);
-
- if (boot_cpu_data.x86_model == 4)
- centaur_mcr0_init();
- else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
- centaur_mcr1_init();
-
- set_mtrr_done(&ctxt);
-}
-#endif
-
-static int centaur_validate_add_page(unsigned long base,
- unsigned long size, unsigned int type)
+static int
+centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
{
/*
- * FIXME: Winchip2 supports uncached
+ * FIXME: Winchip2 supports uncached
*/
- if (type != MTRR_TYPE_WRCOMB &&
+ if (type != MTRR_TYPE_WRCOMB &&
(centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
- printk(KERN_WARNING
- "mtrr: only write-combining%s supported\n",
- centaur_mcr_type ? " and uncacheable are"
- : " is");
+ pr_warning("mtrr: only write-combining%s supported\n",
+ centaur_mcr_type ? " and uncacheable are" : " is");
return -EINVAL;
}
return 0;
@@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base,
static struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR,
-// .init = centaur_mcr_init,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
.get_free_region = centaur_get_free_region,
@@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void)
set_mtrr_ops(&centaur_mtrr_ops);
return 0;
}
-
-//arch_initcall(centaur_init_mtrr);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 1d584a18a50d..b8aba811b60e 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -1,51 +1,52 @@
-/* MTRR (Memory Type Range Register) cleanup
-
- Copyright (C) 2009 Yinghai Lu
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
+/*
+ * MTRR (Memory Type Range Register) cleanup
+ *
+ * Copyright (C) 2009 Yinghai Lu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/cpu.h>
-#include <linux/mutex.h>
#include <linux/sort.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_para.h>
+#include <asm/processor.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
#include <asm/msr.h>
-#include <asm/kvm_para.h>
+
#include "mtrr.h"
-/* should be related to MTRR_VAR_RANGES nums */
+/* Should be related to MTRR_VAR_RANGES nums */
#define RANGE_NUM 256
struct res_range {
- unsigned long start;
- unsigned long end;
+ unsigned long start;
+ unsigned long end;
};
static int __init
-add_range(struct res_range *range, int nr_range, unsigned long start,
- unsigned long end)
+add_range(struct res_range *range, int nr_range,
+ unsigned long start, unsigned long end)
{
- /* out of slots */
+ /* Out of slots: */
if (nr_range >= RANGE_NUM)
return nr_range;
@@ -58,12 +59,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start,
}
static int __init
-add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
- unsigned long end)
+add_range_with_merge(struct res_range *range, int nr_range,
+ unsigned long start, unsigned long end)
{
int i;
- /* try to merge it with old one */
+ /* Try to merge it with old one: */
for (i = 0; i < nr_range; i++) {
unsigned long final_start, final_end;
unsigned long common_start, common_end;
@@ -84,7 +85,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
return nr_range;
}
- /* need to add that */
+ /* Need to add it: */
return add_range(range, nr_range, start, end);
}
@@ -117,7 +118,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end)
}
if (start > range[j].start && end < range[j].end) {
- /* find the new spare */
+ /* Find the new spare: */
for (i = 0; i < RANGE_NUM; i++) {
if (range[i].end == 0)
break;
@@ -147,13 +148,19 @@ static int __init cmp_range(const void *x1, const void *x2)
}
struct var_mtrr_range_state {
- unsigned long base_pfn;
- unsigned long size_pfn;
- mtrr_type type;
+ unsigned long base_pfn;
+ unsigned long size_pfn;
+ mtrr_type type;
};
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
+
static int __initdata debug_print;
+#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
+
+
+#define BIOS_BUG_MSG KERN_WARNING \
+ "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init
x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
@@ -180,7 +187,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
range[i].start, range[i].end + 1);
}
- /* take out UC ranges */
+ /* Take out UC ranges: */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_UNCACHABLE &&
@@ -244,10 +251,9 @@ static int __initdata nr_range;
static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
{
- unsigned long sum;
+ unsigned long sum = 0;
int i;
- sum = 0;
for (i = 0; i < nr_range; i++)
sum += range[i].end + 1 - range[i].start;
@@ -288,7 +294,7 @@ struct var_mtrr_state {
static void __init
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
- unsigned char type, unsigned int address_bits)
+ unsigned char type, unsigned int address_bits)
{
u32 base_lo, base_hi, mask_lo, mask_hi;
u64 base, mask;
@@ -301,7 +307,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
mask = (1ULL << address_bits) - 1;
mask &= ~((((u64)sizek) << 10) - 1);
- base = ((u64)basek) << 10;
+ base = ((u64)basek) << 10;
base |= type;
mask |= 0x800;
@@ -317,15 +323,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
static void __init
save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
- unsigned char type)
+ unsigned char type)
{
range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
range_state[reg].type = type;
}
-static void __init
-set_var_mtrr_all(unsigned int address_bits)
+static void __init set_var_mtrr_all(unsigned int address_bits)
{
unsigned long basek, sizek;
unsigned char type;
@@ -342,11 +347,11 @@ set_var_mtrr_all(unsigned int address_bits)
static unsigned long to_size_factor(unsigned long sizek, char *factorp)
{
- char factor;
unsigned long base = sizek;
+ char factor;
if (base & ((1<<10) - 1)) {
- /* not MB alignment */
+ /* Not MB-aligned: */
factor = 'K';
} else if (base & ((1<<20) - 1)) {
factor = 'M';
@@ -372,11 +377,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
unsigned long max_align, align;
unsigned long sizek;
- /* Compute the maximum size I can make a range */
+ /* Compute the maximum size with which we can make a range: */
if (range_startk)
max_align = ffs(range_startk) - 1;
else
max_align = 32;
+
align = fls(range_sizek) - 1;
if (align > max_align)
align = max_align;
@@ -386,11 +392,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
- start_base = to_size_factor(range_startk,
- &start_factor),
- size_base = to_size_factor(sizek, &size_factor),
+ start_base = to_size_factor(range_startk, &start_factor);
+ size_base = to_size_factor(sizek, &size_factor);
- printk(KERN_DEBUG "Setting variable MTRR %d, "
+ Dprintk("Setting variable MTRR %d, "
"base: %ld%cB, range: %ld%cB, type %s\n",
reg, start_base, start_factor,
size_base, size_factor,
@@ -425,10 +430,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
chunk_sizek = state->chunk_sizek;
gran_sizek = state->gran_sizek;
- /* align with gran size, prevent small block used up MTRRs */
+ /* Align with gran size, prevent small block used up MTRRs: */
range_basek = ALIGN(state->range_startk, gran_sizek);
if ((range_basek > basek) && basek)
return second_sizek;
+
state->range_sizek -= (range_basek - state->range_startk);
range_sizek = ALIGN(state->range_sizek, gran_sizek);
@@ -439,22 +445,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
}
state->range_sizek = range_sizek;
- /* try to append some small hole */
+ /* Try to append some small hole: */
range0_basek = state->range_startk;
range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
- /* no increase */
+ /* No increase: */
if (range0_sizek == state->range_sizek) {
- if (debug_print)
- printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + state->range_sizek)<<10);
+ Dprintk("rangeX: %016lx - %016lx\n",
+ range0_basek<<10,
+ (range0_basek + state->range_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
state->range_sizek, MTRR_TYPE_WRBACK);
return 0;
}
- /* only cut back, when it is not the last */
+ /* Only cut back when it is not the last: */
if (sizek) {
while (range0_basek + range0_sizek > (basek + sizek)) {
if (range0_sizek >= chunk_sizek)
@@ -470,16 +475,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
second_try:
range_basek = range0_basek + range0_sizek;
- /* one hole in the middle */
+ /* One hole in the middle: */
if (range_basek > basek && range_basek <= (basek + sizek))
second_sizek = range_basek - basek;
if (range0_sizek > state->range_sizek) {
- /* one hole in middle or at end */
+ /* One hole in middle or at the end: */
hole_sizek = range0_sizek - state->range_sizek - second_sizek;
- /* hole size should be less than half of range0 size */
+ /* Hole size should be less than half of range0 size: */
if (hole_sizek >= (range0_sizek >> 1) &&
range0_sizek >= chunk_sizek) {
range0_sizek -= chunk_sizek;
@@ -491,32 +496,30 @@ second_try:
}
if (range0_sizek) {
- if (debug_print)
- printk(KERN_DEBUG "range0: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + range0_sizek)<<10);
+ Dprintk("range0: %016lx - %016lx\n",
+ range0_basek<<10,
+ (range0_basek + range0_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
range0_sizek, MTRR_TYPE_WRBACK);
}
if (range0_sizek < state->range_sizek) {
- /* need to handle left over */
+ /* Need to handle left over range: */
range_sizek = state->range_sizek - range0_sizek;
- if (debug_print)
- printk(KERN_DEBUG "range: %016lx - %016lx\n",
- range_basek<<10,
- (range_basek + range_sizek)<<10);
+ Dprintk("range: %016lx - %016lx\n",
+ range_basek<<10,
+ (range_basek + range_sizek)<<10);
+
state->reg = range_to_mtrr(state->reg, range_basek,
range_sizek, MTRR_TYPE_WRBACK);
}
if (hole_sizek) {
hole_basek = range_basek - hole_sizek - second_sizek;
- if (debug_print)
- printk(KERN_DEBUG "hole: %016lx - %016lx\n",
- hole_basek<<10,
- (hole_basek + hole_sizek)<<10);
+ Dprintk("hole: %016lx - %016lx\n",
+ hole_basek<<10,
+ (hole_basek + hole_sizek)<<10);
state->reg = range_to_mtrr(state->reg, hole_basek,
hole_sizek, MTRR_TYPE_UNCACHABLE);
}
@@ -537,23 +540,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
basek = base_pfn << (PAGE_SHIFT - 10);
sizek = size_pfn << (PAGE_SHIFT - 10);
- /* See if I can merge with the last range */
+ /* See if I can merge with the last range: */
if ((basek <= 1024) ||
(state->range_startk + state->range_sizek == basek)) {
unsigned long endk = basek + sizek;
state->range_sizek = endk - state->range_startk;
return;
}
- /* Write the range mtrrs */
+ /* Write the range mtrrs: */
if (state->range_sizek != 0)
second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
- /* Allocate an msr */
+ /* Allocate an msr: */
state->range_startk = basek + second_sizek;
state->range_sizek = sizek - second_sizek;
}
-/* mininum size of mtrr block that can take hole */
+/* Mininum size of mtrr block that can take hole: */
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
static int __init parse_mtrr_chunk_size_opt(char *p)
@@ -565,7 +568,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p)
}
early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
-/* granity of mtrr of block */
+/* Granularity of mtrr of block: */
static u64 mtrr_gran_size __initdata;
static int __init parse_mtrr_gran_size_opt(char *p)
@@ -577,7 +580,7 @@ static int __init parse_mtrr_gran_size_opt(char *p)
}
early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
-static int nr_mtrr_spare_reg __initdata =
+static unsigned long nr_mtrr_spare_reg __initdata =
CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
static int __init parse_mtrr_spare_reg(char *arg)
@@ -586,7 +589,6 @@ static int __init parse_mtrr_spare_reg(char *arg)
nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
return 0;
}
-
early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
static int __init
@@ -594,8 +596,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
u64 chunk_size, u64 gran_size)
{
struct var_mtrr_state var_state;
- int i;
int num_reg;
+ int i;
var_state.range_startk = 0;
var_state.range_sizek = 0;
@@ -605,17 +607,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
memset(range_state, 0, sizeof(range_state));
- /* Write the range etc */
- for (i = 0; i < nr_range; i++)
+ /* Write the range: */
+ for (i = 0; i < nr_range; i++) {
set_var_mtrr_range(&var_state, range[i].start,
range[i].end - range[i].start + 1);
+ }
- /* Write the last range */
+ /* Write the last range: */
if (var_state.range_sizek != 0)
range_to_mtrr_with_hole(&var_state, 0, 0);
num_reg = var_state.reg;
- /* Clear out the extra MTRR's */
+ /* Clear out the extra MTRR's: */
while (var_state.reg < num_var_ranges) {
save_var_mtrr(var_state.reg, 0, 0, 0);
var_state.reg++;
@@ -625,11 +628,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
}
struct mtrr_cleanup_result {
- unsigned long gran_sizek;
- unsigned long chunk_sizek;
- unsigned long lose_cover_sizek;
- unsigned int num_reg;
- int bad;
+ unsigned long gran_sizek;
+ unsigned long chunk_sizek;
+ unsigned long lose_cover_sizek;
+ unsigned int num_reg;
+ int bad;
};
/*
@@ -645,10 +648,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
static void __init print_out_mtrr_range_state(void)
{
- int i;
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
mtrr_type type;
+ int i;
for (i = 0; i < num_var_ranges; i++) {
@@ -676,10 +679,10 @@ static int __init mtrr_need_cleanup(void)
int i;
mtrr_type type;
unsigned long size;
- /* extra one for all 0 */
+ /* Extra one for all 0: */
int num[MTRR_NUM_TYPES + 1];
- /* check entries number */
+ /* Check entries number: */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
@@ -693,88 +696,86 @@ static int __init mtrr_need_cleanup(void)
num[type]++;
}
- /* check if we got UC entries */
+ /* Check if we got UC entries: */
if (!num[MTRR_TYPE_UNCACHABLE])
return 0;
- /* check if we only had WB and UC */
+ /* Check if we only had WB and UC */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
- num_var_ranges - num[MTRR_NUM_TYPES])
+ num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
return 1;
}
static unsigned long __initdata range_sums;
-static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
- unsigned long extra_remove_base,
- unsigned long extra_remove_size,
- int i)
+
+static void __init
+mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
+ unsigned long x_remove_base,
+ unsigned long x_remove_size, int i)
{
- int num_reg;
static struct res_range range_new[RANGE_NUM];
- static int nr_range_new;
unsigned long range_sums_new;
+ static int nr_range_new;
+ int num_reg;
- /* convert ranges to var ranges state */
- num_reg = x86_setup_var_mtrrs(range, nr_range,
- chunk_size, gran_size);
+ /* Convert ranges to var ranges state: */
+ num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
- /* we got new setting in range_state, check it */
+ /* We got new setting in range_state, check it: */
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
- extra_remove_base, extra_remove_size);
+ x_remove_base, x_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
result[i].chunk_sizek = chunk_size >> 10;
result[i].gran_sizek = gran_size >> 10;
result[i].num_reg = num_reg;
+
if (range_sums < range_sums_new) {
- result[i].lose_cover_sizek =
- (range_sums_new - range_sums) << PSHIFT;
+ result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
- } else
- result[i].lose_cover_sizek =
- (range_sums - range_sums_new) << PSHIFT;
+ } else {
+ result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
+ }
- /* double check it */
+ /* Double check it: */
if (!result[i].bad && !result[i].lose_cover_sizek) {
- if (nr_range_new != nr_range ||
- memcmp(range, range_new, sizeof(range)))
- result[i].bad = 1;
+ if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
+ result[i].bad = 1;
}
- if (!result[i].bad && (range_sums - range_sums_new <
- min_loss_pfn[num_reg])) {
- min_loss_pfn[num_reg] =
- range_sums - range_sums_new;
- }
+ if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
+ min_loss_pfn[num_reg] = range_sums - range_sums_new;
}
static void __init mtrr_print_out_one_result(int i)
{
- char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
+ char gran_factor, chunk_factor, lose_factor;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
- printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
- result[i].bad ? "*BAD*" : " ",
- gran_base, gran_factor, chunk_base, chunk_factor);
- printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
- result[i].num_reg, result[i].bad ? "-" : "",
- lose_base, lose_factor);
+
+ pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
+ result[i].bad ? "*BAD*" : " ",
+ gran_base, gran_factor, chunk_base, chunk_factor);
+ pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
+ result[i].num_reg, result[i].bad ? "-" : "",
+ lose_base, lose_factor);
}
static int __init mtrr_search_optimal_index(void)
{
- int i;
int num_reg_good;
int index_good;
+ int i;
if (nr_mtrr_spare_reg >= num_var_ranges)
nr_mtrr_spare_reg = num_var_ranges - 1;
+
num_reg_good = -1;
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
if (!min_loss_pfn[i])
@@ -796,24 +797,24 @@ static int __init mtrr_search_optimal_index(void)
return index_good;
}
-
int __init mtrr_cleanup(unsigned address_bits)
{
- unsigned long extra_remove_base, extra_remove_size;
+ unsigned long x_remove_base, x_remove_size;
unsigned long base, size, def, dummy;
- mtrr_type type;
u64 chunk_size, gran_size;
+ mtrr_type type;
int index_good;
int i;
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
return 0;
+
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
- /* get it and store it aside */
+ /* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
@@ -822,29 +823,28 @@ int __init mtrr_cleanup(unsigned address_bits)
range_state[i].type = type;
}
- /* check if we need handle it and can handle it */
+ /* Check if we need handle it and can handle it: */
if (!mtrr_need_cleanup())
return 0;
- /* print original var MTRRs at first, for debugging: */
+ /* Print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range));
- extra_remove_size = 0;
- extra_remove_base = 1 << (32 - PAGE_SHIFT);
+ x_remove_size = 0;
+ x_remove_base = 1 << (32 - PAGE_SHIFT);
if (mtrr_tom2)
- extra_remove_size =
- (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
- nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
- extra_remove_size);
+ x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
+
+ nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
/*
- * [0, 1M) should always be coverred by var mtrr with WB
- * and fixed mtrrs should take effective before var mtrr for it
+ * [0, 1M) should always be covered by var mtrr with WB
+ * and fixed mtrrs should take effect before var mtrr for it:
*/
nr_range = add_range_with_merge(range, nr_range, 0,
(1ULL<<(20 - PAGE_SHIFT)) - 1);
- /* sort the ranges */
+ /* Sort the ranges: */
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
range_sums = sum_ranges(range, nr_range);
@@ -854,7 +854,7 @@ int __init mtrr_cleanup(unsigned address_bits)
if (mtrr_chunk_size && mtrr_gran_size) {
i = 0;
mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
- extra_remove_base, extra_remove_size, i);
+ x_remove_base, x_remove_size, i);
mtrr_print_out_one_result(i);
@@ -880,7 +880,7 @@ int __init mtrr_cleanup(unsigned address_bits)
continue;
mtrr_calc_range_state(chunk_size, gran_size,
- extra_remove_base, extra_remove_size, i);
+ x_remove_base, x_remove_size, i);
if (debug_print) {
mtrr_print_out_one_result(i);
printk(KERN_INFO "\n");
@@ -890,7 +890,7 @@ int __init mtrr_cleanup(unsigned address_bits)
}
}
- /* try to find the optimal index */
+ /* Try to find the optimal index: */
index_good = mtrr_search_optimal_index();
if (index_good != -1) {
@@ -898,7 +898,7 @@ int __init mtrr_cleanup(unsigned address_bits)
i = index_good;
mtrr_print_out_one_result(i);
- /* convert ranges to var ranges state */
+ /* Convert ranges to var ranges state: */
chunk_size = result[i].chunk_sizek;
chunk_size <<= 10;
gran_size = result[i].gran_sizek;
@@ -941,8 +941,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
* Note this won't check if the MTRRs < 4GB where the magic bit doesn't
* apply to are wrong, but so far we don't know of any such case in the wild.
*/
-#define Tom2Enabled (1U << 21)
-#define Tom2ForceMemTypeWB (1U << 22)
+#define Tom2Enabled (1U << 21)
+#define Tom2ForceMemTypeWB (1U << 22)
int __init amd_special_default_mtrr(void)
{
@@ -952,7 +952,7 @@ int __init amd_special_default_mtrr(void)
return 0;
if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
return 0;
- /* In case some hypervisor doesn't pass SYSCFG through */
+ /* In case some hypervisor doesn't pass SYSCFG through: */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
return 0;
/*
@@ -965,19 +965,21 @@ int __init amd_special_default_mtrr(void)
return 0;
}
-static u64 __init real_trim_memory(unsigned long start_pfn,
- unsigned long limit_pfn)
+static u64 __init
+real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
{
u64 trim_start, trim_size;
+
trim_start = start_pfn;
trim_start <<= PAGE_SHIFT;
+
trim_size = limit_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
- return e820_update_range(trim_start, trim_size, E820_RAM,
- E820_RESERVED);
+ return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
}
+
/**
* mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
* @end_pfn: ending page frame number
@@ -985,7 +987,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn,
* Some buggy BIOSes don't setup the MTRRs properly for systems with certain
* memory configurations. This routine checks that the highest MTRR matches
* the end of memory, to make sure the MTRRs having a write back type cover
- * all of the memory the kernel is intending to use. If not, it'll trim any
+ * all of the memory the kernel is intending to use. If not, it'll trim any
* memory off the end by adjusting end_pfn, removing it from the kernel's
* allocation pools, warning the user with an obnoxious message.
*/
@@ -994,21 +996,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type;
u64 total_trim_size;
-
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
+
/*
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
if (!is_cpu(INTEL) || disable_mtrr_trim)
return 0;
+
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
- /* get it and store it aside */
+ /* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
@@ -1017,7 +1020,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
range_state[i].type = type;
}
- /* Find highest cached pfn */
+ /* Find highest cached pfn: */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_WRBACK)
@@ -1028,13 +1031,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
highest_pfn = base + size;
}
- /* kvm/qemu doesn't have mtrr set right, don't trim them all */
+ /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
if (!highest_pfn) {
printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
return 0;
}
- /* check entries number */
+ /* Check entries number: */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
@@ -1046,11 +1049,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
num[type]++;
}
- /* no entry for WB? */
+ /* No entry for WB? */
if (!num[MTRR_TYPE_WRBACK])
return 0;
- /* check if we only had WB and UC */
+ /* Check if we only had WB and UC: */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
@@ -1066,31 +1069,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
+ /* Check the head: */
total_trim_size = 0;
- /* check the head */
if (range[0].start)
total_trim_size += real_trim_memory(0, range[0].start);
- /* check the holes */
+
+ /* Check the holes: */
for (i = 0; i < nr_range - 1; i++) {
if (range[i].end + 1 < range[i+1].start)
total_trim_size += real_trim_memory(range[i].end + 1,
range[i+1].start);
}
- /* check the top */
+
+ /* Check the top: */
i = nr_range - 1;
if (range[i].end + 1 < end_pfn)
total_trim_size += real_trim_memory(range[i].end + 1,
end_pfn);
if (total_trim_size) {
- printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
- " all of memory, losing %lluMB of RAM.\n",
- total_trim_size >> 20);
+ pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
if (!changed_by_mtrr_cleanup)
WARN_ON(1);
- printk(KERN_INFO "update e820 for mtrr\n");
+ pr_info("update e820 for mtrr\n");
update_e820();
return 1;
@@ -1098,4 +1101,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0;
}
-
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index ff14c320040c..228d982ce09c 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -1,38 +1,40 @@
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mm.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include <asm/io.h>
+
#include <asm/processor-cyrix.h>
#include <asm/processor-flags.h>
+#include <asm/mtrr.h>
+#include <asm/msr.h>
+
#include "mtrr.h"
static void
cyrix_get_arr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type)
{
- unsigned long flags;
unsigned char arr, ccr3, rcr, shift;
+ unsigned long flags;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
- /* Save flags and disable interrupts */
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- ((unsigned char *) base)[3] = getCx86(arr);
- ((unsigned char *) base)[2] = getCx86(arr + 1);
- ((unsigned char *) base)[1] = getCx86(arr + 2);
+ ((unsigned char *)base)[3] = getCx86(arr);
+ ((unsigned char *)base)[2] = getCx86(arr + 1);
+ ((unsigned char *)base)[1] = getCx86(arr + 2);
rcr = getCx86(CX86_RCR_BASE + reg);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
+ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
- /* Enable interrupts if it was enabled previously */
local_irq_restore(flags);
+
shift = ((unsigned char *) base)[1] & 0x0f;
*base >>= PAGE_SHIFT;
- /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
+ /*
+ * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
* Note: shift==0xf means 4G, this is unsupported.
*/
if (shift)
@@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base,
}
}
+/*
+ * cyrix_get_free_region - get a free ARR.
+ *
+ * @base: the starting (base) address of the region.
+ * @size: the size (in bytes) of the region.
+ *
+ * Returns: the index of the region on success, else -1 on error.
+*/
static int
cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-/* [SUMMARY] Get a free ARR.
- <base> The starting (base) address of the region.
- <size> The size (in bytes) of the region.
- [RETURNS] The index of the region on success, else -1 on error.
-*/
{
- int i;
- mtrr_type ltype;
unsigned long lbase, lsize;
+ mtrr_type ltype;
+ int i;
switch (replace_reg) {
case 7:
@@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
cyrix_get_arr(7, &lbase, &lsize, &ltype);
if (lsize == 0)
return 7;
- /* Else try ARR0-ARR6 first */
+ /* Else try ARR0-ARR6 first */
} else {
for (i = 0; i < 7; i++) {
cyrix_get_arr(i, &lbase, &lsize, &ltype);
if (lsize == 0)
return i;
}
- /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
+ /*
+ * ARR0-ARR6 isn't free
+ * try ARR7 but its size must be at least 256K
+ */
cyrix_get_arr(i, &lbase, &lsize, &ltype);
if ((lsize == 0) && (size >= 0x40))
return i;
@@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
return -ENOSPC;
}
-static u32 cr4 = 0;
-static u32 ccr3;
+static u32 cr4, ccr3;
static void prepare_set(void)
{
u32 cr0;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
- if ( cpu_has_pge ) {
+ if (cpu_has_pge) {
cr4 = read_cr4();
write_cr4(cr4 & ~X86_CR4_PGE);
}
- /* Disable and flush caches. Note that wbinvd flushes the TLBs as
- a side-effect */
+ /*
+ * Disable and flush caches.
+ * Note that wbinvd flushes the TLBs as a side-effect
+ */
cr0 = read_cr0() | X86_CR0_CD;
wbinvd();
write_cr0(cr0);
@@ -147,22 +156,21 @@ static void prepare_set(void)
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
-
}
static void post_set(void)
{
- /* Flush caches and TLBs */
+ /* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
-
- /* Enable caches */
+
+ /* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
- if ( cpu_has_pge )
+ /* Restore value of CR4 */
+ if (cpu_has_pge)
write_cr4(cr4);
}
@@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
size >>= 6;
size &= 0x7fff; /* make sure arr_size <= 14 */
- for (arr_size = 0; size; arr_size++, size >>= 1) ;
+ for (arr_size = 0; size; arr_size++, size >>= 1)
+ ;
if (reg < 7) {
switch (type) {
@@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
prepare_set();
base <<= PAGE_SHIFT;
- setCx86(arr, ((unsigned char *) &base)[3]);
- setCx86(arr + 1, ((unsigned char *) &base)[2]);
- setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
+ setCx86(arr + 0, ((unsigned char *)&base)[3]);
+ setCx86(arr + 1, ((unsigned char *)&base)[2]);
+ setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size);
setCx86(CX86_RCR_BASE + reg, arr_type);
post_set();
}
typedef struct {
- unsigned long base;
- unsigned long size;
- mtrr_type type;
+ unsigned long base;
+ unsigned long size;
+ mtrr_type type;
} arr_state_t;
static arr_state_t arr_state[8] = {
@@ -247,16 +256,17 @@ static void cyrix_set_all(void)
setCx86(CX86_CCR0 + i, ccr_state[i]);
for (; i < 7; i++)
setCx86(CX86_CCR4 + i, ccr_state[i]);
- for (i = 0; i < 8; i++)
- cyrix_set_arr(i, arr_state[i].base,
+
+ for (i = 0; i < 8; i++) {
+ cyrix_set_arr(i, arr_state[i].base,
arr_state[i].size, arr_state[i].type);
+ }
post_set();
}
static struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
-// .init = cyrix_arr_init,
.set_all = cyrix_set_all,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
@@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void)
set_mtrr_ops(&cyrix_mtrr_ops);
return 0;
}
-
-//arch_initcall(cyrix_init_mtrr);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0543f69f0b27..55da0c5f68dd 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -1,28 +1,34 @@
-/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
- because MTRRs can span upto 40 bits (36bits on most modern x86) */
+/*
+ * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
+ * because MTRRs can span upto 40 bits (36bits on most modern x86)
+ */
+#define DEBUG
+
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/mm.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include <asm/system.h>
-#include <asm/cpufeature.h>
+
#include <asm/processor-flags.h>
+#include <asm/cpufeature.h>
#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/mtrr.h>
+#include <asm/msr.h>
#include <asm/pat.h>
+
#include "mtrr.h"
struct fixed_range_block {
- int base_msr; /* start address of an MTRR block */
- int ranges; /* number of MTRRs in this block */
+ int base_msr; /* start address of an MTRR block */
+ int ranges; /* number of MTRRs in this block */
};
static struct fixed_range_block fixed_range_blocks[] = {
- { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
- { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
- { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
+ { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
+ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
+ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
{}
};
@@ -30,10 +36,10 @@ static unsigned long smp_changes_mask;
static int mtrr_state_set;
u64 mtrr_tom2;
-struct mtrr_state_type mtrr_state = {};
+struct mtrr_state_type mtrr_state;
EXPORT_SYMBOL_GPL(mtrr_state);
-/**
+/*
* BIOS is expected to clear MtrrFixDramModEn bit, see for example
* "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" (26094 Rev. 3.30 February 2006), section
@@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
* Look of multiple ranges matching this address and pick type
* as per MTRR precedence
*/
- if (!(mtrr_state.enabled & 2)) {
+ if (!(mtrr_state.enabled & 2))
return mtrr_state.def_type;
- }
prev_match = 0xFF;
for (i = 0; i < num_var_ranges; ++i) {
@@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
if (start_state != end_state)
return 0xFE;
- if ((start & mask) != (base & mask)) {
+ if ((start & mask) != (base & mask))
continue;
- }
curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
if (prev_match == 0xFF) {
@@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
curr_match = MTRR_TYPE_WRTHROUGH;
}
- if (prev_match != curr_match) {
+ if (prev_match != curr_match)
return MTRR_TYPE_UNCACHABLE;
- }
}
if (mtrr_tom2) {
@@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end)
return mtrr_state.def_type;
}
-/* Get the MSR pair relating to a var range */
+/* Get the MSR pair relating to a var range */
static void
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
@@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}
-/* fill the MSR pair relating to a var range */
+/* Fill the MSR pair relating to a var range */
void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
{
@@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index,
vr[index].mask_hi = mask_hi;
}
-static void
-get_fixed_ranges(mtrr_type * frs)
+static void get_fixed_ranges(mtrr_type *frs)
{
- unsigned int *p = (unsigned int *) frs;
+ unsigned int *p = (unsigned int *)frs;
int i;
k8_check_syscfg_dram_mod_en();
@@ -217,22 +219,22 @@ static void __init print_fixed_last(void)
if (!last_fixed_end)
return;
- printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start,
- last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
+ pr_debug(" %05X-%05X %s\n", last_fixed_start,
+ last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
last_fixed_end = 0;
}
static void __init update_fixed_last(unsigned base, unsigned end,
- mtrr_type type)
+ mtrr_type type)
{
last_fixed_start = base;
last_fixed_end = end;
last_fixed_type = type;
}
-static void __init print_fixed(unsigned base, unsigned step,
- const mtrr_type *types)
+static void __init
+print_fixed(unsigned base, unsigned step, const mtrr_type *types)
{
unsigned i;
@@ -259,54 +261,55 @@ static void __init print_mtrr_state(void)
unsigned int i;
int high_width;
- printk(KERN_DEBUG "MTRR default type: %s\n",
- mtrr_attrib_to_str(mtrr_state.def_type));
+ pr_debug("MTRR default type: %s\n",
+ mtrr_attrib_to_str(mtrr_state.def_type));
if (mtrr_state.have_fixed) {
- printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
- mtrr_state.enabled & 1 ? "en" : "dis");
+ pr_debug("MTRR fixed ranges %sabled:\n",
+ mtrr_state.enabled & 1 ? "en" : "dis");
print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
for (i = 0; i < 2; ++i)
- print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
+ print_fixed(0x80000 + i * 0x20000, 0x04000,
+ mtrr_state.fixed_ranges + (i + 1) * 8);
for (i = 0; i < 8; ++i)
- print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
+ print_fixed(0xC0000 + i * 0x08000, 0x01000,
+ mtrr_state.fixed_ranges + (i + 3) * 8);
/* tail */
print_fixed_last();
}
- printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
- mtrr_state.enabled & 2 ? "en" : "dis");
+ pr_debug("MTRR variable ranges %sabled:\n",
+ mtrr_state.enabled & 2 ? "en" : "dis");
if (size_or_mask & 0xffffffffUL)
high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
else
high_width = ffs(size_or_mask>>32) + 32 - 1;
high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
+
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
- printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
- i,
- high_width,
- mtrr_state.var_ranges[i].base_hi,
- mtrr_state.var_ranges[i].base_lo >> 12,
- high_width,
- mtrr_state.var_ranges[i].mask_hi,
- mtrr_state.var_ranges[i].mask_lo >> 12,
- mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
+ pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
+ i,
+ high_width,
+ mtrr_state.var_ranges[i].base_hi,
+ mtrr_state.var_ranges[i].base_lo >> 12,
+ high_width,
+ mtrr_state.var_ranges[i].mask_hi,
+ mtrr_state.var_ranges[i].mask_lo >> 12,
+ mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
else
- printk(KERN_DEBUG " %u disabled\n", i);
- }
- if (mtrr_tom2) {
- printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
- mtrr_tom2, mtrr_tom2>>20);
+ pr_debug(" %u disabled\n", i);
}
+ if (mtrr_tom2)
+ pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
}
-/* Grab all of the MTRR state for this CPU into *state */
+/* Grab all of the MTRR state for this CPU into *state */
void __init get_mtrr_state(void)
{
- unsigned int i;
struct mtrr_var_range *vrs;
- unsigned lo, dummy;
unsigned long flags;
+ unsigned lo, dummy;
+ unsigned int i;
vrs = mtrr_state.var_ranges;
@@ -324,6 +327,7 @@ void __init get_mtrr_state(void)
if (amd_special_default_mtrr()) {
unsigned low, high;
+
/* TOP_MEM2 */
rdmsr(MSR_K8_TOP_MEM2, low, high);
mtrr_tom2 = high;
@@ -344,10 +348,9 @@ void __init get_mtrr_state(void)
post_set();
local_irq_restore(flags);
-
}
-/* Some BIOS's are fucked and don't set all MTRRs the same! */
+/* Some BIOS's are messed up and don't set all MTRRs the same! */
void __init mtrr_state_warn(void)
{
unsigned long mask = smp_changes_mask;
@@ -355,28 +358,33 @@ void __init mtrr_state_warn(void)
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
+ pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
+ pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+ pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+
printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
printk(KERN_INFO "mtrr: corrected configuration.\n");
}
-/* Doesn't attempt to pass an error out to MTRR users
- because it's quite complicated in some cases and probably not
- worth it because the best error handling is to ignore it. */
+/*
+ * Doesn't attempt to pass an error out to MTRR users
+ * because it's quite complicated in some cases and probably not
+ * worth it because the best error handling is to ignore it.
+ */
void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
{
- if (wrmsr_safe(msr, a, b) < 0)
+ if (wrmsr_safe(msr, a, b) < 0) {
printk(KERN_ERR
"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
smp_processor_id(), msr, a, b);
+ }
}
/**
- * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
+ * set_fixed_range - checks & updates a fixed-range MTRR if it
+ * differs from the value it should have
* @msr: MSR address of the MTTR which should be checked and updated
* @changed: pointer which indicates whether the MTRR needed to be changed
* @msrwords: pointer to the MSR values which the MSR should have
@@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
*
* Returns: The index of the region on success, else negative on error.
*/
-int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
+int
+generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
{
- int i, max;
- mtrr_type ltype;
unsigned long lbase, lsize;
+ mtrr_type ltype;
+ int i, max;
max = num_var_ranges;
if (replace_reg >= 0 && replace_reg < max)
return replace_reg;
+
for (i = 0; i < max; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
if (lsize == 0)
return i;
}
+
return -ENOSPC;
}
@@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
if ((mask_lo & 0x800) == 0) {
- /* Invalid (i.e. free) range */
+ /* Invalid (i.e. free) range */
*base = 0;
*size = 0;
*type = 0;
@@ -471,27 +482,31 @@ out_put_cpu:
}
/**
- * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
+ * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
+ * differ from the saved set
* @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
*/
-static int set_fixed_ranges(mtrr_type * frs)
+static int set_fixed_ranges(mtrr_type *frs)
{
- unsigned long long *saved = (unsigned long long *) frs;
+ unsigned long long *saved = (unsigned long long *)frs;
bool changed = false;
- int block=-1, range;
+ int block = -1, range;
k8_check_syscfg_dram_mod_en();
- while (fixed_range_blocks[++block].ranges)
- for (range=0; range < fixed_range_blocks[block].ranges; range++)
- set_fixed_range(fixed_range_blocks[block].base_msr + range,
- &changed, (unsigned int *) saved++);
+ while (fixed_range_blocks[++block].ranges) {
+ for (range = 0; range < fixed_range_blocks[block].ranges; range++)
+ set_fixed_range(fixed_range_blocks[block].base_msr + range,
+ &changed, (unsigned int *)saved++);
+ }
return changed;
}
-/* Set the MSR pair relating to a var range. Returns TRUE if
- changes are made */
+/*
+ * Set the MSR pair relating to a var range.
+ * Returns true if changes are made.
+ */
static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
unsigned int lo, hi;
@@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
+
mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
changed = true;
}
@@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi;
*/
static unsigned long set_mtrr_state(void)
{
- unsigned int i;
unsigned long change_mask = 0;
+ unsigned int i;
- for (i = 0; i < num_var_ranges; i++)
+ for (i = 0; i < num_var_ranges; i++) {
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
+ }
if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
- /* Set_mtrr_restore restores the old value of MTRRdefType,
- so to set it we fiddle with the saved value */
+ /*
+ * Set_mtrr_restore restores the old value of MTRRdefType,
+ * so to set it we fiddle with the saved value:
+ */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
- deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
+
+ deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
+ (mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
@@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void)
}
-static unsigned long cr4 = 0;
+static unsigned long cr4;
static DEFINE_SPINLOCK(set_atomicity_lock);
/*
- * Since we are disabling the cache don't allow any interrupts - they
- * would run extremely slow and would only increase the pain. The caller must
- * ensure that local interrupts are disabled and are reenabled after post_set()
- * has been called.
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after post_set() has been called.
*/
-
static void prepare_set(void) __acquires(set_atomicity_lock)
{
unsigned long cr0;
- /* Note that this is not ideal, since the cache is only flushed/disabled
- for this CPU while the MTRRs are changed, but changing this requires
- more invasive changes to the way the kernel boots */
+ /*
+ * Note that this is not ideal
+ * since the cache is only flushed/disabled for this CPU while the
+ * MTRRs are changed, but changing this requires more invasive
+ * changes to the way the kernel boots
+ */
spin_lock(&set_atomicity_lock);
- /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+ /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
cr0 = read_cr0() | X86_CR0_CD;
write_cr0(cr0);
wbinvd();
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
- if ( cpu_has_pge ) {
+ /* Save value of CR4 and clear Page Global Enable (bit 7) */
+ if (cpu_has_pge) {
cr4 = read_cr4();
write_cr4(cr4 & ~X86_CR4_PGE);
}
@@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
__flush_tlb();
- /* Save MTRR state */
+ /* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
- /* Disable MTRRs, and set the default type to uncached */
+ /* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
}
static void post_set(void) __releases(set_atomicity_lock)
{
- /* Flush TLBs (no need to flush caches - they are disabled) */
+ /* Flush TLBs (no need to flush caches - they are disabled) */
__flush_tlb();
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
- /* Enable caches */
+
+ /* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
- if ( cpu_has_pge )
+ /* Restore value of CR4 */
+ if (cpu_has_pge)
write_cr4(cr4);
spin_unlock(&set_atomicity_lock);
}
@@ -623,24 +647,27 @@ static void generic_set_all(void)
post_set();
local_irq_restore(flags);
- /* Use the atomic bitops to update the global mask */
+ /* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
-
+
}
+/**
+ * generic_set_mtrr - set variable MTRR register on the local CPU.
+ *
+ * @reg: The register to set.
+ * @base: The base address of the region.
+ * @size: The size of the region. If this is 0 the region is disabled.
+ * @type: The type of the region.
+ *
+ * Returns nothing.
+ */
static void generic_set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
-/* [SUMMARY] Set variable MTRR register on the local CPU.
- <reg> The register to set.
- <base> The base address of the region.
- <size> The size of the region. If this is 0 the region is disabled.
- <type> The type of the region.
- [RETURNS] Nothing.
-*/
{
unsigned long flags;
struct mtrr_var_range *vr;
@@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
prepare_set();
if (size == 0) {
- /* The invalid bit is kept in the mask, so we simply clear the
- relevant mask register to disable a range. */
+ /*
+ * The invalid bit is kept in the mask, so we simply
+ * clear the relevant mask register to disable a range.
+ */
mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
memset(vr, 0, sizeof(struct mtrr_var_range));
} else {
@@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
local_irq_restore(flags);
}
-int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
+int generic_validate_add_page(unsigned long base, unsigned long size,
+ unsigned int type)
{
unsigned long lbase, last;
- /* For Intel PPro stepping <= 7, must be 4 MiB aligned
- and not touch 0x70000000->0x7003FFFF */
+ /*
+ * For Intel PPro stepping <= 7
+ * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
+ */
if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
- printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
+ pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL;
}
if (!(base + size < 0x70000 || base > 0x7003F) &&
(type == MTRR_TYPE_WRCOMB
|| type == MTRR_TYPE_WRBACK)) {
- printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
+ pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
return -EINVAL;
}
}
- /* Check upper bits of base and last are equal and lower bits are 0
- for base and 1 for last */
+ /*
+ * Check upper bits of base and last are equal and lower bits are 0
+ * for base and 1 for last
+ */
last = base + size - 1;
for (lbase = base; !(lbase & 1) && (last & 1);
- lbase = lbase >> 1, last = last >> 1) ;
+ lbase = lbase >> 1, last = last >> 1)
+ ;
if (lbase != last) {
- printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
- base, size);
+ pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
return -EINVAL;
}
return 0;
}
-
static int generic_have_wrcomb(void)
{
unsigned long config, dummy;
rdmsr(MSR_MTRRcap, config, dummy);
- return (config & (1 << 10));
+ return config & (1 << 10);
}
int positive_have_wrcomb(void)
@@ -716,14 +749,15 @@ int positive_have_wrcomb(void)
return 1;
}
-/* generic structure...
+/*
+ * Generic structure...
*/
struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = 1,
- .set_all = generic_set_all,
- .get = generic_get_mtrr,
- .get_free_region = generic_get_free_region,
- .set = generic_set_mtrr,
- .validate_add_page = generic_validate_add_page,
- .have_wrcomb = generic_have_wrcomb,
+ .use_intel_if = 1,
+ .set_all = generic_set_all,
+ .get = generic_get_mtrr,
+ .get_free_region = generic_get_free_region,
+ .set = generic_set_mtrr,
+ .validate_add_page = generic_validate_add_page,
+ .have_wrcomb = generic_have_wrcomb,
};
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index fb73a52913a4..08b6ea4c62b4 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -1,27 +1,28 @@
-#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/capability.h>
-#include <linux/ctype.h>
-#include <linux/module.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
#define LINE_SIZE 80
#include <asm/mtrr.h>
+
#include "mtrr.h"
#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
static const char *const mtrr_strings[MTRR_NUM_TYPES] =
{
- "uncachable", /* 0 */
- "write-combining", /* 1 */
- "?", /* 2 */
- "?", /* 3 */
- "write-through", /* 4 */
- "write-protect", /* 5 */
- "write-back", /* 6 */
+ "uncachable", /* 0 */
+ "write-combining", /* 1 */
+ "?", /* 2 */
+ "?", /* 3 */
+ "write-through", /* 4 */
+ "write-protect", /* 5 */
+ "write-back", /* 6 */
};
const char *mtrr_attrib_to_str(int x)
@@ -35,8 +36,8 @@ static int
mtrr_file_add(unsigned long base, unsigned long size,
unsigned int type, bool increment, struct file *file, int page)
{
+ unsigned int *fcount = FILE_FCOUNT(file);
int reg, max;
- unsigned int *fcount = FILE_FCOUNT(file);
max = num_var_ranges;
if (fcount == NULL) {
@@ -61,8 +62,8 @@ static int
mtrr_file_del(unsigned long base, unsigned long size,
struct file *file, int page)
{
- int reg;
unsigned int *fcount = FILE_FCOUNT(file);
+ int reg;
if (!page) {
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
@@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size,
return reg;
}
-/* RED-PEN: seq_file can seek now. this is ignored. */
+/*
+ * seq_file can seek but we ignore it.
+ *
+ * Format of control line:
+ * "base=%Lx size=%Lx type=%s" or "disable=%d"
+ */
static ssize_t
mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
-/* Format of control line:
- "base=%Lx size=%Lx type=%s" OR:
- "disable=%d"
-*/
{
int i, err;
unsigned long reg;
@@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return -EPERM;
if (!len)
return -EINVAL;
+
memset(line, 0, LINE_SIZE);
if (len > LINE_SIZE)
len = LINE_SIZE;
if (copy_from_user(line, buf, len - 1))
return -EFAULT;
+
linelen = strlen(line);
ptr = line + linelen - 1;
if (linelen && *ptr == '\n')
*ptr = '\0';
+
if (!strncmp(line, "disable=", 8)) {
reg = simple_strtoul(line + 8, &ptr, 0);
err = mtrr_del_page(reg, 0, 0);
@@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return err;
return len;
}
+
if (strncmp(line, "base=", 5))
return -EINVAL;
+
base = simple_strtoull(line + 5, &ptr, 0);
- for (; isspace(*ptr); ++ptr) ;
+ for (; isspace(*ptr); ++ptr)
+ ;
+
if (strncmp(ptr, "size=", 5))
return -EINVAL;
+
size = simple_strtoull(ptr + 5, &ptr, 0);
if ((base & 0xfff) || (size & 0xfff))
return -EINVAL;
- for (; isspace(*ptr); ++ptr) ;
+ for (; isspace(*ptr); ++ptr)
+ ;
+
if (strncmp(ptr, "type=", 5))
return -EINVAL;
ptr += 5;
- for (; isspace(*ptr); ++ptr) ;
+ for (; isspace(*ptr); ++ptr)
+ ;
+
for (i = 0; i < MTRR_NUM_TYPES; ++i) {
if (strcmp(ptr, mtrr_strings[i]))
continue;
base >>= PAGE_SHIFT;
size >>= PAGE_SHIFT;
- err =
- mtrr_add_page((unsigned long) base, (unsigned long) size, i,
- true);
+ err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
if (err < 0)
return err;
return len;
@@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
case MTRRIOC32_SET_PAGE_ENTRY:
case MTRRIOC32_DEL_PAGE_ENTRY:
case MTRRIOC32_KILL_PAGE_ENTRY: {
- struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
+ struct mtrr_sentry32 __user *s32;
+
+ s32 = (struct mtrr_sentry32 __user *)__arg;
err = get_user(sentry.base, &s32->base);
err |= get_user(sentry.size, &s32->size);
err |= get_user(sentry.type, &s32->type);
@@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
}
case MTRRIOC32_GET_ENTRY:
case MTRRIOC32_GET_PAGE_ENTRY: {
- struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
+ struct mtrr_gentry32 __user *g32;
+
+ g32 = (struct mtrr_gentry32 __user *)__arg;
err = get_user(gentry.regnum, &g32->regnum);
err |= get_user(gentry.base, &g32->base);
err |= get_user(gentry.size, &g32->size);
@@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
if (err)
return err;
- switch(cmd) {
+ switch (cmd) {
case MTRRIOC_GET_ENTRY:
case MTRRIOC_GET_PAGE_ENTRY:
if (copy_to_user(arg, &gentry, sizeof gentry))
@@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
#ifdef CONFIG_COMPAT
case MTRRIOC32_GET_ENTRY:
case MTRRIOC32_GET_PAGE_ENTRY: {
- struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
+ struct mtrr_gentry32 __user *g32;
+
+ g32 = (struct mtrr_gentry32 __user *)__arg;
err = put_user(gentry.base, &g32->base);
err |= put_user(gentry.size, &g32->size);
err |= put_user(gentry.regnum, &g32->regnum);
@@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
return err;
}
-static int
-mtrr_close(struct inode *ino, struct file *file)
+static int mtrr_close(struct inode *ino, struct file *file)
{
- int i, max;
unsigned int *fcount = FILE_FCOUNT(file);
+ int i, max;
if (fcount != NULL) {
max = num_var_ranges;
@@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset);
static int mtrr_open(struct inode *inode, struct file *file)
{
- if (!mtrr_if)
+ if (!mtrr_if)
return -EIO;
- if (!mtrr_if->get)
- return -ENXIO;
+ if (!mtrr_if->get)
+ return -ENXIO;
return single_open(file, mtrr_seq_show, NULL);
}
static const struct file_operations mtrr_fops = {
- .owner = THIS_MODULE,
- .open = mtrr_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = mtrr_write,
- .unlocked_ioctl = mtrr_ioctl,
- .compat_ioctl = mtrr_ioctl,
- .release = mtrr_close,
+ .owner = THIS_MODULE,
+ .open = mtrr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = mtrr_write,
+ .unlocked_ioctl = mtrr_ioctl,
+ .compat_ioctl = mtrr_ioctl,
+ .release = mtrr_close,
};
static int mtrr_seq_show(struct seq_file *seq, void *offset)
@@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
max = num_var_ranges;
for (i = 0; i < max; i++) {
mtrr_if->get(i, &base, &size, &type);
- if (size == 0)
+ if (size == 0) {
mtrr_usage_table[i] = 0;
- else {
- if (size < (0x100000 >> PAGE_SHIFT)) {
- /* less than 1MB */
- factor = 'K';
- size <<= PAGE_SHIFT - 10;
- } else {
- factor = 'M';
- size >>= 20 - PAGE_SHIFT;
- }
- /* RED-PEN: base can be > 32bit */
- len += seq_printf(seq,
- "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
- i, base, base >> (20 - PAGE_SHIFT), size, factor,
- mtrr_usage_table[i], mtrr_attrib_to_str(type));
+ continue;
}
+ if (size < (0x100000 >> PAGE_SHIFT)) {
+ /* less than 1MB */
+ factor = 'K';
+ size <<= PAGE_SHIFT - 10;
+ } else {
+ factor = 'M';
+ size >>= 20 - PAGE_SHIFT;
+ }
+ /* Base can be > 32bit */
+ len += seq_printf(seq, "reg%02i: base=0x%06lx000 "
+ "(%5luMB), size=%5lu%cB, count=%d: %s\n",
+ i, base, base >> (20 - PAGE_SHIFT), size,
+ factor, mtrr_usage_table[i],
+ mtrr_attrib_to_str(type));
}
return 0;
}
@@ -422,6 +440,5 @@ static int __init mtrr_if_init(void)
proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
return 0;
}
-
arch_initcall(mtrr_if_init);
#endif /* CONFIG_PROC_FS */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 8fc248b5aeaf..7af0f88a4163 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -25,43 +25,48 @@
Operating System Writer's Guide" (Intel document number 242692),
section 11.11.7
- This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
- on 6-7 March 2002.
- Source: Intel Architecture Software Developers Manual, Volume 3:
+ This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
+ on 6-7 March 2002.
+ Source: Intel Architecture Software Developers Manual, Volume 3:
System Programming Guide; Section 9.11. (1997 edition - PPro).
*/
+#define DEBUG
+
+#include <linux/types.h> /* FIXME: kvm_para.h needs this */
+
+#include <linux/kvm_para.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/init.h>
+#include <linux/sort.h>
+#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/sort.h>
+#include <asm/processor.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
#include <asm/msr.h>
-#include <asm/kvm_para.h>
+
#include "mtrr.h"
-u32 num_var_ranges = 0;
+u32 num_var_ranges;
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
-static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
+static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
-struct mtrr_ops * mtrr_if = NULL;
+struct mtrr_ops *mtrr_if;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
-void set_mtrr_ops(struct mtrr_ops * ops)
+void set_mtrr_ops(struct mtrr_ops *ops)
{
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
mtrr_ops[ops->vendor] = ops;
@@ -72,30 +77,36 @@ static int have_wrcomb(void)
{
struct pci_dev *dev;
u8 rev;
-
- if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
- /* ServerWorks LE chipsets < rev 6 have problems with write-combining
- Don't allow it and leave room for other chipsets to be tagged */
+
+ dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
+ if (dev != NULL) {
+ /*
+ * ServerWorks LE chipsets < rev 6 have problems with
+ * write-combining. Don't allow it and leave room for other
+ * chipsets to be tagged
+ */
if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
if (rev <= 5) {
- printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
+ pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
pci_dev_put(dev);
return 0;
}
}
- /* Intel 450NX errata # 23. Non ascending cacheline evictions to
- write combining memory may resulting in data corruption */
+ /*
+ * Intel 450NX errata # 23. Non ascending cacheline evictions to
+ * write combining memory may resulting in data corruption
+ */
if (dev->vendor == PCI_VENDOR_ID_INTEL &&
dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
- printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
+ pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
pci_dev_put(dev);
return 0;
}
pci_dev_put(dev);
- }
- return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
+ }
+ return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
}
/* This function returns the number of variable MTRRs */
@@ -103,12 +114,13 @@ static void __init set_num_var_ranges(void)
{
unsigned long config = 0, dummy;
- if (use_intel()) {
+ if (use_intel())
rdmsr(MSR_MTRRcap, config, dummy);
- } else if (is_cpu(AMD))
+ else if (is_cpu(AMD))
config = 2;
else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
config = 8;
+
num_var_ranges = config & 0xff;
}
@@ -130,10 +142,12 @@ struct set_mtrr_data {
mtrr_type smp_type;
};
+/**
+ * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
+ *
+ * Returns nothing.
+ */
static void ipi_handler(void *info)
-/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
- [RETURNS] Nothing.
-*/
{
#ifdef CONFIG_SMP
struct set_mtrr_data *data = info;
@@ -142,18 +156,19 @@ static void ipi_handler(void *info)
local_irq_save(flags);
atomic_dec(&data->count);
- while(!atomic_read(&data->gate))
+ while (!atomic_read(&data->gate))
cpu_relax();
/* The master has cleared me to execute */
- if (data->smp_reg != ~0U)
- mtrr_if->set(data->smp_reg, data->smp_base,
+ if (data->smp_reg != ~0U) {
+ mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
- else
+ } else {
mtrr_if->set_all();
+ }
atomic_dec(&data->count);
- while(atomic_read(&data->gate))
+ while (atomic_read(&data->gate))
cpu_relax();
atomic_dec(&data->count);
@@ -161,7 +176,8 @@ static void ipi_handler(void *info)
#endif
}
-static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
+static inline int types_compatible(mtrr_type type1, mtrr_type type2)
+{
return type1 == MTRR_TYPE_UNCACHABLE ||
type2 == MTRR_TYPE_UNCACHABLE ||
(type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
@@ -176,10 +192,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* @type: mtrr type
*
* This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
- *
+ *
* 1. Send IPI to do the following:
* 2. Disable Interrupts
- * 3. Wait for all procs to do so
+ * 3. Wait for all procs to do so
* 4. Enter no-fill cache mode
* 5. Flush caches
* 6. Clear PGE bit
@@ -189,26 +205,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* 10. Enable all range registers
* 11. Flush all TLBs and caches again
* 12. Enter normal cache mode and reenable caching
- * 13. Set PGE
+ * 13. Set PGE
* 14. Wait for buddies to catch up
* 15. Enable interrupts.
- *
+ *
* What does that mean for us? Well, first we set data.count to the number
* of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
* until it hits 0 and proceed. We set the data.gate flag and reset data.count.
- * Meanwhile, they are waiting for that flag to be set. Once it's set, each
- * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
- * differently, so we call mtrr_if->set() callback and let them take care of it.
- * When they're done, they again decrement data->count and wait for data.gate to
- * be reset.
- * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
+ * Meanwhile, they are waiting for that flag to be set. Once it's set, each
+ * CPU goes through the transition of updating MTRRs.
+ * The CPU vendors may each do it differently,
+ * so we call mtrr_if->set() callback and let them take care of it.
+ * When they're done, they again decrement data->count and wait for data.gate
+ * to be reset.
+ * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
* Everyone then enables interrupts and we all continue on.
*
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
* becomes nops.
*/
-static void set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
+static void
+set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
struct set_mtrr_data data;
unsigned long flags;
@@ -218,121 +235,122 @@ static void set_mtrr(unsigned int reg, unsigned long base,
data.smp_size = size;
data.smp_type = type;
atomic_set(&data.count, num_booting_cpus() - 1);
- /* make sure data.count is visible before unleashing other CPUs */
+
+ /* Make sure data.count is visible before unleashing other CPUs */
smp_wmb();
- atomic_set(&data.gate,0);
+ atomic_set(&data.gate, 0);
- /* Start the ball rolling on other CPUs */
+ /* Start the ball rolling on other CPUs */
if (smp_call_function(ipi_handler, &data, 0) != 0)
panic("mtrr: timed out waiting for other CPUs\n");
local_irq_save(flags);
- while(atomic_read(&data.count))
+ while (atomic_read(&data.count))
cpu_relax();
- /* ok, reset count and toggle gate */
+ /* Ok, reset count and toggle gate */
atomic_set(&data.count, num_booting_cpus() - 1);
smp_wmb();
- atomic_set(&data.gate,1);
+ atomic_set(&data.gate, 1);
- /* do our MTRR business */
+ /* Do our MTRR business */
- /* HACK!
+ /*
+ * HACK!
* We use this same function to initialize the mtrrs on boot.
* The state of the boot cpu's mtrrs has been saved, and we want
- * to replicate across all the APs.
+ * to replicate across all the APs.
* If we're doing that @reg is set to something special...
*/
- if (reg != ~0U)
- mtrr_if->set(reg,base,size,type);
+ if (reg != ~0U)
+ mtrr_if->set(reg, base, size, type);
- /* wait for the others */
- while(atomic_read(&data.count))
+ /* Wait for the others */
+ while (atomic_read(&data.count))
cpu_relax();
atomic_set(&data.count, num_booting_cpus() - 1);
smp_wmb();
- atomic_set(&data.gate,0);
+ atomic_set(&data.gate, 0);
/*
* Wait here for everyone to have seen the gate change
* So we're the last ones to touch 'data'
*/
- while(atomic_read(&data.count))
+ while (atomic_read(&data.count))
cpu_relax();
local_irq_restore(flags);
}
/**
- * mtrr_add_page - Add a memory type region
- * @base: Physical base address of region in pages (in units of 4 kB!)
- * @size: Physical size of region in pages (4 kB)
- * @type: Type of MTRR desired
- * @increment: If this is true do usage counting on the region
+ * mtrr_add_page - Add a memory type region
+ * @base: Physical base address of region in pages (in units of 4 kB!)
+ * @size: Physical size of region in pages (4 kB)
+ * @type: Type of MTRR desired
+ * @increment: If this is true do usage counting on the region
*
- * Memory type region registers control the caching on newer Intel and
- * non Intel processors. This function allows drivers to request an
- * MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
- * caller should expect to need to provide a power of two size on an
- * equivalent power of two boundary.
+ * Memory type region registers control the caching on newer Intel and
+ * non Intel processors. This function allows drivers to request an
+ * MTRR is added. The details and hardware specifics of each processor's
+ * implementation are hidden from the caller, but nevertheless the
+ * caller should expect to need to provide a power of two size on an
+ * equivalent power of two boundary.
*
- * If the region cannot be added either because all regions are in use
- * or the CPU cannot support it a negative value is returned. On success
- * the register number for this entry is returned, but should be treated
- * as a cookie only.
+ * If the region cannot be added either because all regions are in use
+ * or the CPU cannot support it a negative value is returned. On success
+ * the register number for this entry is returned, but should be treated
+ * as a cookie only.
*
- * On a multiprocessor machine the changes are made to all processors.
- * This is required on x86 by the Intel processors.
+ * On a multiprocessor machine the changes are made to all processors.
+ * This is required on x86 by the Intel processors.
*
- * The available types are
+ * The available types are
*
- * %MTRR_TYPE_UNCACHABLE - No caching
+ * %MTRR_TYPE_UNCACHABLE - No caching
*
- * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
+ * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
*
- * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
+ * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
*
- * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
+ * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
*
- * BUGS: Needs a quiet flag for the cases where drivers do not mind
- * failures and do not wish system log messages to be sent.
+ * BUGS: Needs a quiet flag for the cases where drivers do not mind
+ * failures and do not wish system log messages to be sent.
*/
-
-int mtrr_add_page(unsigned long base, unsigned long size,
+int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment)
{
+ unsigned long lbase, lsize;
int i, replace, error;
mtrr_type ltype;
- unsigned long lbase, lsize;
if (!mtrr_if)
return -ENXIO;
-
- if ((error = mtrr_if->validate_add_page(base,size,type)))
+
+ error = mtrr_if->validate_add_page(base, size, type);
+ if (error)
return error;
if (type >= MTRR_NUM_TYPES) {
- printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
+ pr_warning("mtrr: type: %u invalid\n", type);
return -EINVAL;
}
- /* If the type is WC, check that this processor supports it */
+ /* If the type is WC, check that this processor supports it */
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
- printk(KERN_WARNING
- "mtrr: your processor doesn't support write-combining\n");
+ pr_warning("mtrr: your processor doesn't support write-combining\n");
return -ENOSYS;
}
if (!size) {
- printk(KERN_WARNING "mtrr: zero sized request\n");
+ pr_warning("mtrr: zero sized request\n");
return -EINVAL;
}
if (base & size_or_mask || size & size_or_mask) {
- printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
+ pr_warning("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL;
}
@@ -341,36 +359,40 @@ int mtrr_add_page(unsigned long base, unsigned long size,
/* No CPU hotplug when we change MTRR entries */
get_online_cpus();
- /* Search for existing MTRR */
+
+ /* Search for existing MTRR */
mutex_lock(&mtrr_mutex);
for (i = 0; i < num_var_ranges; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
+ if (!lsize || base > lbase + lsize - 1 ||
+ base + size - 1 < lbase)
continue;
- /* At this point we know there is some kind of overlap/enclosure */
+ /*
+ * At this point we know there is some kind of
+ * overlap/enclosure
+ */
if (base < lbase || base + size - 1 > lbase + lsize - 1) {
- if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
+ if (base <= lbase &&
+ base + size - 1 >= lbase + lsize - 1) {
/* New region encloses an existing region */
if (type == ltype) {
replace = replace == -1 ? i : -2;
continue;
- }
- else if (types_compatible(type, ltype))
+ } else if (types_compatible(type, ltype))
continue;
}
- printk(KERN_WARNING
- "mtrr: 0x%lx000,0x%lx000 overlaps existing"
- " 0x%lx000,0x%lx000\n", base, size, lbase,
- lsize);
+ pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
+ " 0x%lx000,0x%lx000\n", base, size, lbase,
+ lsize);
goto out;
}
- /* New region is enclosed by an existing region */
+ /* New region is enclosed by an existing region */
if (ltype != type) {
if (types_compatible(type, ltype))
continue;
- printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
- base, size, mtrr_attrib_to_str(ltype),
- mtrr_attrib_to_str(type));
+ pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
+ base, size, mtrr_attrib_to_str(ltype),
+ mtrr_attrib_to_str(type));
goto out;
}
if (increment)
@@ -378,7 +400,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
error = i;
goto out;
}
- /* Search for an empty MTRR */
+ /* Search for an empty MTRR */
i = mtrr_if->get_free_region(base, size, replace);
if (i >= 0) {
set_mtrr(i, base, size, type);
@@ -393,8 +415,9 @@ int mtrr_add_page(unsigned long base, unsigned long size,
mtrr_usage_table[replace] = 0;
}
}
- } else
- printk(KERN_INFO "mtrr: no more MTRRs available\n");
+ } else {
+ pr_info("mtrr: no more MTRRs available\n");
+ }
error = i;
out:
mutex_unlock(&mtrr_mutex);
@@ -405,10 +428,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
static int mtrr_check(unsigned long base, unsigned long size)
{
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_WARNING
- "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG
- "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
+ pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
dump_stack();
return -1;
}
@@ -416,66 +437,64 @@ static int mtrr_check(unsigned long base, unsigned long size)
}
/**
- * mtrr_add - Add a memory type region
- * @base: Physical base address of region
- * @size: Physical size of region
- * @type: Type of MTRR desired
- * @increment: If this is true do usage counting on the region
+ * mtrr_add - Add a memory type region
+ * @base: Physical base address of region
+ * @size: Physical size of region
+ * @type: Type of MTRR desired
+ * @increment: If this is true do usage counting on the region
*
- * Memory type region registers control the caching on newer Intel and
- * non Intel processors. This function allows drivers to request an
- * MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
- * caller should expect to need to provide a power of two size on an
- * equivalent power of two boundary.
+ * Memory type region registers control the caching on newer Intel and
+ * non Intel processors. This function allows drivers to request an
+ * MTRR is added. The details and hardware specifics of each processor's
+ * implementation are hidden from the caller, but nevertheless the
+ * caller should expect to need to provide a power of two size on an
+ * equivalent power of two boundary.
*
- * If the region cannot be added either because all regions are in use
- * or the CPU cannot support it a negative value is returned. On success
- * the register number for this entry is returned, but should be treated
- * as a cookie only.
+ * If the region cannot be added either because all regions are in use
+ * or the CPU cannot support it a negative value is returned. On success
+ * the register number for this entry is returned, but should be treated
+ * as a cookie only.
*
- * On a multiprocessor machine the changes are made to all processors.
- * This is required on x86 by the Intel processors.
+ * On a multiprocessor machine the changes are made to all processors.
+ * This is required on x86 by the Intel processors.
*
- * The available types are
+ * The available types are
*
- * %MTRR_TYPE_UNCACHABLE - No caching
+ * %MTRR_TYPE_UNCACHABLE - No caching
*
- * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
+ * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
*
- * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
+ * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
*
- * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
+ * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
*
- * BUGS: Needs a quiet flag for the cases where drivers do not mind
- * failures and do not wish system log messages to be sent.
+ * BUGS: Needs a quiet flag for the cases where drivers do not mind
+ * failures and do not wish system log messages to be sent.
*/
-
-int
-mtrr_add(unsigned long base, unsigned long size, unsigned int type,
- bool increment)
+int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
+ bool increment)
{
if (mtrr_check(base, size))
return -EINVAL;
return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
increment);
}
+EXPORT_SYMBOL(mtrr_add);
/**
- * mtrr_del_page - delete a memory type region
- * @reg: Register returned by mtrr_add
- * @base: Physical base address
- * @size: Size of region
+ * mtrr_del_page - delete a memory type region
+ * @reg: Register returned by mtrr_add
+ * @base: Physical base address
+ * @size: Size of region
*
- * If register is supplied then base and size are ignored. This is
- * how drivers should call it.
+ * If register is supplied then base and size are ignored. This is
+ * how drivers should call it.
*
- * Releases an MTRR region. If the usage count drops to zero the
- * register is freed and the region returns to default state.
- * On success the register is returned, on failure a negative error
- * code.
+ * Releases an MTRR region. If the usage count drops to zero the
+ * register is freed and the region returns to default state.
+ * On success the register is returned, on failure a negative error
+ * code.
*/
-
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
int i, max;
@@ -500,22 +519,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
}
}
if (reg < 0) {
- printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
- size);
+ pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
+ base, size);
goto out;
}
}
if (reg >= max) {
- printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
+ pr_warning("mtrr: register: %d too big\n", reg);
goto out;
}
mtrr_if->get(reg, &lbase, &lsize, &ltype);
if (lsize < 1) {
- printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
+ pr_warning("mtrr: MTRR %d not used\n", reg);
goto out;
}
if (mtrr_usage_table[reg] < 1) {
- printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
+ pr_warning("mtrr: reg: %d has count=0\n", reg);
goto out;
}
if (--mtrr_usage_table[reg] < 1)
@@ -526,33 +545,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
put_online_cpus();
return error;
}
+
/**
- * mtrr_del - delete a memory type region
- * @reg: Register returned by mtrr_add
- * @base: Physical base address
- * @size: Size of region
+ * mtrr_del - delete a memory type region
+ * @reg: Register returned by mtrr_add
+ * @base: Physical base address
+ * @size: Size of region
*
- * If register is supplied then base and size are ignored. This is
- * how drivers should call it.
+ * If register is supplied then base and size are ignored. This is
+ * how drivers should call it.
*
- * Releases an MTRR region. If the usage count drops to zero the
- * register is freed and the region returns to default state.
- * On success the register is returned, on failure a negative error
- * code.
+ * Releases an MTRR region. If the usage count drops to zero the
+ * register is freed and the region returns to default state.
+ * On success the register is returned, on failure a negative error
+ * code.
*/
-
-int
-mtrr_del(int reg, unsigned long base, unsigned long size)
+int mtrr_del(int reg, unsigned long base, unsigned long size)
{
if (mtrr_check(base, size))
return -EINVAL;
return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
}
-
-EXPORT_SYMBOL(mtrr_add);
EXPORT_SYMBOL(mtrr_del);
-/* HACK ALERT!
+/*
+ * HACK ALERT!
* These should be called implicitly, but we can't yet until all the initcall
* stuff is done...
*/
@@ -576,29 +593,28 @@ struct mtrr_value {
static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
-static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
+static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
- mtrr_if->get(i,
- &mtrr_value[i].lbase,
- &mtrr_value[i].lsize,
- &mtrr_value[i].ltype);
+ mtrr_if->get(i, &mtrr_value[i].lbase,
+ &mtrr_value[i].lsize,
+ &mtrr_value[i].ltype);
}
return 0;
}
-static int mtrr_restore(struct sys_device * sysdev)
+static int mtrr_restore(struct sys_device *sysdev)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
- if (mtrr_value[i].lsize)
- set_mtrr(i,
- mtrr_value[i].lbase,
- mtrr_value[i].lsize,
- mtrr_value[i].ltype);
+ if (mtrr_value[i].lsize) {
+ set_mtrr(i, mtrr_value[i].lbase,
+ mtrr_value[i].lsize,
+ mtrr_value[i].ltype);
+ }
}
return 0;
}
@@ -615,26 +631,29 @@ int __initdata changed_by_mtrr_cleanup;
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
- * This needs to be called early; before any of the other CPUs are
+ * This needs to be called early; before any of the other CPUs are
* initialized (i.e. before smp_init()).
- *
+ *
*/
void __init mtrr_bp_init(void)
{
u32 phys_addr;
+
init_ifs();
phys_addr = 32;
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
+ size_or_mask = 0xff000000; /* 36 bits */
size_and_mask = 0x00f00000;
phys_addr = 36;
- /* This is an AMD specific MSR, but we assume(hope?) that
- Intel will implement it to when they extend the address
- bus of the Xeon. */
+ /*
+ * This is an AMD specific MSR, but we assume(hope?) that
+ * Intel will implement it to when they extend the address
+ * bus of the Xeon.
+ */
if (cpuid_eax(0x80000000) >= 0x80000008) {
phys_addr = cpuid_eax(0x80000008) & 0xff;
/* CPUID workaround for Intel 0F33/0F34 CPU */
@@ -649,9 +668,11 @@ void __init mtrr_bp_init(void)
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) {
- /* VIA C* family have Intel style MTRRs, but
- don't support PAE */
- size_or_mask = 0xfff00000; /* 32 bits */
+ /*
+ * VIA C* family have Intel style MTRRs,
+ * but don't support PAE
+ */
+ size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
phys_addr = 32;
}
@@ -694,7 +715,6 @@ void __init mtrr_bp_init(void)
changed_by_mtrr_cleanup = 1;
mtrr_if->set_all();
}
-
}
}
}
@@ -706,12 +726,17 @@ void mtrr_ap_init(void)
if (!mtrr_if || !use_intel())
return;
/*
- * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
- * but this routine will be called in cpu boot time, holding the lock
- * breaks it. This routine is called in two cases: 1.very earily time
- * of software resume, when there absolutely isn't mtrr entry changes;
- * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
- * prevent mtrr entry changes
+ * Ideally we should hold mtrr_mutex here to avoid mtrr entries
+ * changed, but this routine will be called in cpu boot time,
+ * holding the lock breaks it.
+ *
+ * This routine is called in two cases:
+ *
+ * 1. very earily time of software resume, when there absolutely
+ * isn't mtrr entry changes;
+ *
+ * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
+ * lock to prevent mtrr entry changes
*/
local_irq_save(flags);
@@ -732,19 +757,23 @@ static int __init mtrr_init_finialize(void)
{
if (!mtrr_if)
return 0;
+
if (use_intel()) {
if (!changed_by_mtrr_cleanup)
mtrr_state_warn();
- } else {
- /* The CPUs haven't MTRR and seem to not support SMP. They have
- * specific drivers, we use a tricky method to support
- * suspend/resume for them.
- * TBD: is there any system with such CPU which supports
- * suspend/resume? if no, we should remove the code.
- */
- sysdev_driver_register(&cpu_sysdev_class,
- &mtrr_sysdev_driver);
+ return 0;
}
+
+ /*
+ * The CPU has no MTRR and seems to not support SMP. They have
+ * specific drivers, we use a tricky method to support
+ * suspend/resume for them.
+ *
+ * TBD: is there any system with such CPU which supports
+ * suspend/resume? If no, we should remove the code.
+ */
+ sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
+
return 0;
}
subsys_initcall(mtrr_init_finialize);
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 7538b767f206..a501dee9a87a 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -1,5 +1,5 @@
/*
- * local mtrr defines.
+ * local MTRR defines.
*/
#include <linux/types.h>
@@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
u32 vendor;
u32 use_intel_if;
-// void (*init)(void);
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
void (*get)(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type);
+ unsigned long *size, mtrr_type *type);
int (*get_free_region)(unsigned long base, unsigned long size,
int replace_reg);
int (*validate_add_page)(unsigned long base, unsigned long size,
@@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void);
/* library functions for processor-specific routines */
struct set_mtrr_context {
- unsigned long flags;
- unsigned long cr4val;
- u32 deftype_lo;
- u32 deftype_hi;
- u32 ccr3;
+ unsigned long flags;
+ unsigned long cr4val;
+ u32 deftype_lo;
+ u32 deftype_hi;
+ u32 ccr3;
};
void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
void get_mtrr_state(void);
-extern void set_mtrr_ops(struct mtrr_ops * ops);
+extern void set_mtrr_ops(struct mtrr_ops *ops);
extern u64 size_or_mask, size_and_mask;
-extern struct mtrr_ops * mtrr_if;
+extern struct mtrr_ops *mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
index 1f5fb1588d1f..dfc80b4e6b0d 100644
--- a/arch/x86/kernel/cpu/mtrr/state.c
+++ b/arch/x86/kernel/cpu/mtrr/state.c
@@ -1,24 +1,25 @@
-#include <linux/mm.h>
#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
#include <asm/processor-cyrix.h>
#include <asm/processor-flags.h>
-#include "mtrr.h"
+#include <asm/mtrr.h>
+#include <asm/msr.h>
+#include "mtrr.h"
-/* Put the processor into a state where MTRRs can be safely set */
+/* Put the processor into a state where MTRRs can be safely set */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
{
unsigned int cr0;
- /* Disable interrupts locally */
+ /* Disable interrupts locally */
local_irq_save(ctxt->flags);
if (use_intel() || is_cpu(CYRIX)) {
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
+ /* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_has_pge) {
ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
@@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
write_cr0(cr0);
wbinvd();
- if (use_intel())
- /* Save MTRR state */
+ if (use_intel()) {
+ /* Save MTRR state */
rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
- else
- /* Cyrix ARRs - everything else were excluded at the top */
+ } else {
+ /*
+ * Cyrix ARRs -
+ * everything else were excluded at the top
+ */
ctxt->ccr3 = getCx86(CX86_CCR3);
+ }
}
}
void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
{
- if (use_intel())
- /* Disable MTRRs, and set the default type to uncached */
+ if (use_intel()) {
+ /* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL,
ctxt->deftype_hi);
- else if (is_cpu(CYRIX))
- /* Cyrix ARRs - everything else were excluded at the top */
- setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
+ } else {
+ if (is_cpu(CYRIX)) {
+ /* Cyrix ARRs - everything else were excluded at the top */
+ setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
+ }
+ }
}
-/* Restore the processor after a set_mtrr_prepare */
+/* Restore the processor after a set_mtrr_prepare */
void set_mtrr_done(struct set_mtrr_context *ctxt)
{
if (use_intel() || is_cpu(CYRIX)) {
- /* Flush caches and TLBs */
+ /* Flush caches and TLBs */
wbinvd();
- /* Restore MTRRdefType */
- if (use_intel())
+ /* Restore MTRRdefType */
+ if (use_intel()) {
/* Intel (P6) standard MTRRs */
- mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
- else
- /* Cyrix ARRs - everything else was excluded at the top */
+ mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo,
+ ctxt->deftype_hi);
+ } else {
+ /*
+ * Cyrix ARRs -
+ * everything else was excluded at the top
+ */
setCx86(CX86_CCR3, ctxt->ccr3);
+ }
- /* Enable caches */
+ /* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
+ /* Restore value of CR4 */
if (cpu_has_pge)
write_cr4(ctxt->cr4val);
}
- /* Re-enable interrupts locally (if enabled previously) */
+ /* Re-enable interrupts locally (if enabled previously) */
local_irq_restore(ctxt->flags);
}
-
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 76dfef23f789..835d1b3b88cd 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -401,7 +401,7 @@ static const u64 amd_hw_cache_event_ids
[ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */
+ [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
x86_pmu_disable_counter(hwc, idx);
}
-static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
+static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
- per_cpu(prev_left[idx], smp_processor_id()) = left;
+ per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
* The hw counter starts counting from this counter offset,
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask);
+ perf_counter_update_userpage(counter);
+
return ret;
}
@@ -969,13 +971,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
if (!x86_pmu.num_counters_fixed)
return -1;
- /*
- * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86_model == 28)
- return -1;
-
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1041,6 +1036,8 @@ try_generic:
x86_perf_counter_set_period(counter, hwc, idx);
x86_pmu.enable(hwc, idx);
+ perf_counter_update_userpage(counter);
+
return 0;
}
@@ -1089,7 +1086,7 @@ void perf_counter_print_debug(void)
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
- prev_left = per_cpu(prev_left[idx], cpu);
+ prev_left = per_cpu(pmc_prev_left[idx], cpu);
pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl);
@@ -1133,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
x86_perf_counter_update(counter, hwc, idx);
cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
+
+ perf_counter_update_userpage(counter);
}
/*
@@ -1428,8 +1427,6 @@ static int intel_pmu_init(void)
*/
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
-
/*
* Install the hw-cache-events table:
*/
@@ -1499,21 +1496,22 @@ void __init init_hw_perf_counters(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
perf_max_counters = x86_pmu.num_counters;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
perf_counter_mask |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl = perf_counter_mask;
perf_counters_lapic_init();
register_die_notifier(&perf_counter_nmi_notifier);
@@ -1561,8 +1559,9 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
entry->ip[entry->nr++] = ip;
}
-static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
-static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
+static DEFINE_PER_CPU(int, in_nmi_frame);
static void
@@ -1578,7 +1577,9 @@ static void backtrace_warning(void *data, char *msg)
static int backtrace_stack(void *data, char *name)
{
- /* Process all stacks: */
+ per_cpu(in_nmi_frame, smp_processor_id()) =
+ x86_is_stack_id(NMI_STACK, name);
+
return 0;
}
@@ -1586,6 +1587,9 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
+ if (per_cpu(in_nmi_frame, smp_processor_id()))
+ return;
+
if (reliable)
callchain_store(entry, addr);
}
@@ -1709,9 +1713,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
struct perf_callchain_entry *entry;
if (in_nmi())
- entry = &__get_cpu_var(nmi_entry);
+ entry = &__get_cpu_var(pmc_nmi_entry);
else
- entry = &__get_cpu_var(irq_entry);
+ entry = &__get_cpu_var(pmc_irq_entry);
entry->nr = 0;
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 5c481f6205bf..e60ed740d2b3 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -803,8 +803,3 @@ int __kprobes lapic_wd_event(unsigned nmi_hz)
wd_ops->rearm(wd, nmi_hz);
return 1;
}
-
-int lapic_watchdog_ok(void)
-{
- return wd_ops != NULL;
-}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index d5e30397246b..f82706a3901d 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
#endif
seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
-#ifdef CONFIG_X86_64
seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
c->x86_phys_bits, c->x86_virt_bits);
-#endif
seq_printf(m, "power management:");
for (i = 0; i < 32; i++) {
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 48bfe1386038..ef42a038f1a6 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -509,15 +509,15 @@ enum bts_field {
bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
};
-static inline unsigned long bts_get(const char *base, enum bts_field field)
+static inline unsigned long bts_get(const char *base, unsigned long field)
{
base += (ds_cfg.sizeof_ptr_field * field);
return *(unsigned long *)base;
}
-static inline void bts_set(char *base, enum bts_field field, unsigned long val)
+static inline void bts_set(char *base, unsigned long field, unsigned long val)
{
- base += (ds_cfg.sizeof_ptr_field * field);;
+ base += (ds_cfg.sizeof_ptr_field * field);
(*(unsigned long *)base) = val;
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 95ea5fa7d444..c8405718a4c3 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,6 +22,7 @@
#include "dumpstack.h"
int panic_on_unrecovered_nmi;
+int panic_on_io_nmi;
unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index d593cd1f58dc..bca5fba91c9e 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -19,6 +19,12 @@
#include "dumpstack.h"
+/* Just a stub for now */
+int x86_is_stack_id(int id, char *name)
+{
+ return 0;
+}
+
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index d35db5993fd6..54b0a3276766 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -19,10 +19,8 @@
#include "dumpstack.h"
-static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
- unsigned *usedp, char **idp)
-{
- static char ids[][8] = {
+
+static char x86_stack_ids[][8] = {
[DEBUG_STACK - 1] = "#DB",
[NMI_STACK - 1] = "NMI",
[DOUBLEFAULT_STACK - 1] = "#DF",
@@ -33,6 +31,15 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
#endif
};
+
+int x86_is_stack_id(int id, char *name)
+{
+ return x86_stack_ids[id - 1] == name;
+}
+
+static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
+ unsigned *usedp, char **idp)
+{
unsigned k;
/*
@@ -61,7 +68,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
if (*usedp & (1U << k))
break;
*usedp |= 1U << k;
- *idp = ids[k];
+ *idp = x86_stack_ids[k];
return (unsigned long *)end;
}
/*
@@ -81,12 +88,13 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
do {
++j;
end -= EXCEPTION_STKSZ;
- ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
+ x86_stack_ids[j][4] = '1' +
+ (j - N_EXCEPTION_STACKS);
} while (stack < end - EXCEPTION_STKSZ);
if (*usedp & (1U << j))
break;
*usedp |= 1U << j;
- *idp = ids[j];
+ *idp = x86_stack_ids[j];
return (unsigned long *)end;
}
#endif
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 7271fa33d791..c4ca89d9aaf4 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1383,6 +1383,8 @@ static unsigned long ram_alignment(resource_size_t pos)
return 32*1024*1024;
}
+#define MAX_RESOURCE_SIZE ((resource_size_t)-1)
+
void __init e820_reserve_resources_late(void)
{
int i;
@@ -1400,17 +1402,19 @@ void __init e820_reserve_resources_late(void)
* avoid stolen RAM:
*/
for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *entry = &e820_saved.map[i];
- resource_size_t start, end;
+ struct e820entry *entry = &e820.map[i];
+ u64 start, end;
if (entry->type != E820_RAM)
continue;
start = entry->addr + entry->size;
- end = round_up(start, ram_alignment(start));
- if (start == end)
+ end = round_up(start, ram_alignment(start)) - 1;
+ if (end > MAX_RESOURCE_SIZE)
+ end = MAX_RESOURCE_SIZE;
+ if (start >= end)
continue;
- reserve_region_with_split(&iomem_resource, start,
- end - 1, "RAM buffer");
+ reserve_region_with_split(&iomem_resource, start, end,
+ "RAM buffer");
}
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8d82a77a3f3b..8352c0b9643f 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -85,10 +85,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
gdb_regs[GDB_DS] = regs->ds;
gdb_regs[GDB_ES] = regs->es;
gdb_regs[GDB_CS] = regs->cs;
- gdb_regs[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_FS] = 0xFFFF;
gdb_regs[GDB_GS] = 0xFFFF;
- gdb_regs[GDB_SP] = (int)&regs->sp;
+ if (user_mode_vm(regs)) {
+ gdb_regs[GDB_SS] = regs->ss;
+ gdb_regs[GDB_SP] = regs->sp;
+ } else {
+ gdb_regs[GDB_SS] = __KERNEL_DS;
+ gdb_regs[GDB_SP] = (unsigned long)&regs->sp;
+ }
#else
gdb_regs[GDB_R8] = regs->r8;
gdb_regs[GDB_R9] = regs->r9;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a78ecad0c900..c664d515f613 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -200,7 +200,7 @@ static void kvm_leave_lazy_mmu(void)
state->mode = paravirt_get_lazy_mode();
}
-static void paravirt_ops_setup(void)
+static void __init paravirt_ops_setup(void)
{
pv_info.name = "KVM";
pv_info.paravirt_enabled = 1;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 71f1d99a635d..ec6ef60cbd17 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
- if (!cpus_equal(current->mm->cpu_vm_mask,
- cpumask_of_cpu(smp_processor_id())))
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1);
preempt_enable();
#else
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 47630479b067..1a041bcf506b 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -211,11 +211,11 @@ static __init int iommu_setup(char *p)
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
+#endif
if (!strncmp(p, "pt", 2)) {
iommu_pass_through = 1;
return 1;
}
-#endif
gart_parse_options(p);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 994dd6a4a2a0..b8143acf30f3 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -572,10 +572,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
void __init init_c1e_mask(void)
{
/* If we're using c1e_idle, we need to allocate c1e_mask. */
- if (pm_idle == c1e_idle) {
- alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
- cpumask_clear(c1e_mask);
- }
+ if (pm_idle == c1e_idle)
+ zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
}
static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 59f4524984af..a80eddd41658 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -350,14 +350,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ bool preload_fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- __unlazy_fpu(prev_p);
+ /*
+ * If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
+ __unlazy_fpu(prev_p);
/* we're going to use this soon, after a few expensive things */
- if (next_p->fpu_counter > 5)
+ if (preload_fpu)
prefetch(next->xstate);
/*
@@ -398,6 +405,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
+ /* If we're going to preload the fpu context, make sure clts
+ is run while we're batching the cpu state updates. */
+ if (preload_fpu)
+ clts();
+
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
@@ -407,15 +419,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
arch_end_context_switch(next_p);
- /* If the task has used fpu the last 5 timeslices, just do a full
- * restore of the math state immediately to avoid the trap; the
- * chances of needing FPU soon are obviously high now
- *
- * tsk_used_math() checks prevent calling math_state_restore(),
- * which can sleep in the case of !tsk_used_math()
- */
- if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
- math_state_restore();
+ if (preload_fpu)
+ __math_state_restore();
/*
* Restore %gs if needed (which is common)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ebefb5407b9d..a28279dbb07c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -386,9 +386,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
+ bool preload_fpu;
+
+ /*
+ * If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
/* we're going to use this soon, after a few expensive things */
- if (next_p->fpu_counter > 5)
+ if (preload_fpu)
prefetch(next->xstate);
/*
@@ -419,6 +427,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
load_TLS(next, cpu);
+ /* Must be after DS reload */
+ unlazy_fpu(prev_p);
+
+ /* Make sure cpu is ready for new context */
+ if (preload_fpu)
+ clts();
+
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
@@ -459,9 +474,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
prev->gsindex = gsindex;
- /* Must be after DS reload */
- unlazy_fpu(prev_p);
-
/*
* Switch the PDA and FPU contexts.
*/
@@ -480,15 +492,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
- /* If the task has used fpu the last 5 timeslices, just do a full
- * restore of the math state immediately to avoid the trap; the
- * chances of needing FPU soon are obviously high now
- *
- * tsk_used_math() checks prevent calling math_state_restore(),
- * which can sleep in the case of !tsk_used_math()
+ /*
+ * Preload the FPU context, now that we've determined that the
+ * task is likely to be using it.
*/
- if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
- math_state_restore();
+ if (preload_fpu)
+ __math_state_restore();
return prev_p;
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index d2d1ce8170f0..4c53aeb050ab 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -32,7 +32,7 @@ EXPORT_SYMBOL(pm_power_off);
static const struct desc_ptr no_idt = {};
static int reboot_mode;
-enum reboot_type reboot_type = BOOT_KBD;
+enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80f897f..d1d9b0066a4a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
return ret;
}
+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+ if (direct_gbpages && cpu_has_gbpages)
+ printk(KERN_INFO "Using GB pages for direct mapping\n");
+ else
+ direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
reserve_brk();
+ init_gbpages();
+
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
@@ -1072,7 +1088,6 @@ void __init x86_quirk_time_init(void)
return;
}
- irq0.mask = cpumask_of_cpu(0);
setup_irq(0, &irq0);
}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 9c3f0823e6aa..7501bb14bd51 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -124,118 +124,101 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
}
/*
- * Remap allocator
- *
- * This allocator uses PMD page as unit. A PMD page is allocated for
- * each cpu and each is remapped into vmalloc area using PMD mapping.
- * As PMD page is quite large, only part of it is used for the first
- * chunk. Unused part is returned to the bootmem allocator.
- *
- * So, the PMD pages are mapped twice - once to the physical mapping
- * and to the vmalloc area for the first percpu chunk. The double
- * mapping does add one more PMD TLB entry pressure but still is much
- * better than only using 4k mappings while still being NUMA friendly.
+ * Helpers for first chunk memory allocation
*/
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-static size_t pcpur_size __initdata;
-static void **pcpur_ptrs __initdata;
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size)
+{
+ return pcpu_alloc_bootmem(cpu, size, size);
+}
-static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+static void __init pcpu_fc_free(void *ptr, size_t size)
{
- size_t off = (size_t)pageno << PAGE_SHIFT;
+ free_bootmem(__pa(ptr), size);
+}
- if (off >= pcpur_size)
- return NULL;
+/*
+ * Large page remapping allocator
+ */
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+static void __init pcpul_map(void *ptr, size_t size, void *addr)
+{
+ pmd_t *pmd, pmd_v;
- return virt_to_page(pcpur_ptrs[cpu] + off);
+ pmd = populate_extra_pmd((unsigned long)addr);
+ pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
+ set_pmd(pmd, pmd_v);
}
-static ssize_t __init setup_pcpu_remap(size_t static_size)
+static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
{
- static struct vm_struct vm;
- size_t ptrs_size, dyn_size;
- unsigned int cpu;
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
+ return LOCAL_DISTANCE;
+ else
+ return REMOTE_DISTANCE;
+}
+
+static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
+{
+ size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
+ size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
+ size_t unit_map_size, unit_size;
+ int *unit_map;
+ int nr_units;
ssize_t ret;
- /*
- * If large page isn't supported, there's no benefit in doing
- * this. Also, on non-NUMA, embedding is better.
- *
- * NOTE: disabled for now.
- */
- if (true || !cpu_has_pse || !pcpu_need_numa())
+ /* on non-NUMA, embedding is better */
+ if (!chosen && !pcpu_need_numa())
return -EINVAL;
- /*
- * Currently supports only single page. Supporting multiple
- * pages won't be too difficult if it ever becomes necessary.
- */
- pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
- PERCPU_DYNAMIC_RESERVE);
- if (pcpur_size > PMD_SIZE) {
- pr_warning("PERCPU: static data is larger than large page, "
- "can't use large page\n");
+ /* need PSE */
+ if (!cpu_has_pse) {
+ pr_warning("PERCPU: lpage allocator requires PSE\n");
return -EINVAL;
}
- dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
-
- /* allocate pointer array and alloc large pages */
- ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
- pcpur_ptrs = alloc_bootmem(ptrs_size);
-
- for_each_possible_cpu(cpu) {
- pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
- if (!pcpur_ptrs[cpu])
- goto enomem;
-
- /*
- * Only use pcpur_size bytes and give back the rest.
- *
- * Ingo: The 2MB up-rounding bootmem is needed to make
- * sure the partial 2MB page is still fully RAM - it's
- * not well-specified to have a PAT-incompatible area
- * (unmapped RAM, device memory, etc.) in that hole.
- */
- free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
- PMD_SIZE - pcpur_size);
- memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
+ /* allocate and build unit_map */
+ unit_map_size = num_possible_cpus() * sizeof(int);
+ unit_map = alloc_bootmem_nopanic(unit_map_size);
+ if (!unit_map) {
+ pr_warning("PERCPU: failed to allocate unit_map\n");
+ return -ENOMEM;
}
- /* allocate address and map */
- vm.flags = VM_ALLOC;
- vm.size = num_possible_cpus() * PMD_SIZE;
- vm_area_register_early(&vm, PMD_SIZE);
-
- for_each_possible_cpu(cpu) {
- pmd_t *pmd;
-
- pmd = populate_extra_pmd((unsigned long)vm.addr
- + cpu * PMD_SIZE);
- set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
- PAGE_KERNEL_LARGE));
+ ret = pcpu_lpage_build_unit_map(static_size,
+ PERCPU_FIRST_CHUNK_RESERVE,
+ &dyn_size, &unit_size, PMD_SIZE,
+ unit_map, pcpu_lpage_cpu_distance);
+ if (ret < 0) {
+ pr_warning("PERCPU: failed to build unit_map\n");
+ goto out_free;
+ }
+ nr_units = ret;
+
+ /* do the parameters look okay? */
+ if (!chosen) {
+ size_t vm_size = VMALLOC_END - VMALLOC_START;
+ size_t tot_size = nr_units * unit_size;
+
+ /* don't consume more than 20% of vmalloc area */
+ if (tot_size > vm_size / 5) {
+ pr_info("PERCPU: too large chunk size %zuMB for "
+ "large page remap\n", tot_size >> 20);
+ ret = -EINVAL;
+ goto out_free;
+ }
}
- /* we're ready, commit */
- pr_info("PERCPU: Remapped at %p with large pages, static data "
- "%zu bytes\n", vm.addr, static_size);
-
- ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
- PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
- PMD_SIZE, vm.addr, NULL);
- goto out_free_ar;
-
-enomem:
- for_each_possible_cpu(cpu)
- if (pcpur_ptrs[cpu])
- free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
- ret = -ENOMEM;
-out_free_ar:
- free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+ ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
+ dyn_size, unit_size, PMD_SIZE,
+ unit_map, nr_units,
+ pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
+out_free:
+ if (ret < 0)
+ free_bootmem(__pa(unit_map), unit_map_size);
return ret;
}
#else
-static ssize_t __init setup_pcpu_remap(size_t static_size)
+static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
{
return -EINVAL;
}
@@ -249,7 +232,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
* mapping so that it can use PMD mapping without additional TLB
* pressure.
*/
-static ssize_t __init setup_pcpu_embed(size_t static_size)
+static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
{
size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
@@ -258,30 +241,19 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
* this. Also, embedding allocation doesn't play well with
* NUMA.
*/
- if (!cpu_has_pse || pcpu_need_numa())
+ if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
return -EINVAL;
return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
- reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
+ reserve - PERCPU_FIRST_CHUNK_RESERVE);
}
/*
- * 4k page allocator
+ * 4k allocator
*
- * This is the basic allocator. Static percpu area is allocated
- * page-by-page and most of initialization is done by the generic
- * setup function.
+ * Boring fallback 4k allocator. This allocator puts more pressure on
+ * PTE TLBs but other than that behaves nicely on both UMA and NUMA.
*/
-static struct page **pcpu4k_pages __initdata;
-static int pcpu4k_nr_static_pages __initdata;
-
-static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
-{
- if (pageno < pcpu4k_nr_static_pages)
- return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
- return NULL;
-}
-
static void __init pcpu4k_populate_pte(unsigned long addr)
{
populate_extra_pte(addr);
@@ -289,49 +261,20 @@ static void __init pcpu4k_populate_pte(unsigned long addr)
static ssize_t __init setup_pcpu_4k(size_t static_size)
{
- size_t pages_size;
- unsigned int cpu;
- int i, j;
- ssize_t ret;
-
- pcpu4k_nr_static_pages = PFN_UP(static_size);
-
- /* unaligned allocations can't be freed, round up to page size */
- pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
- * sizeof(pcpu4k_pages[0]));
- pcpu4k_pages = alloc_bootmem(pages_size);
-
- /* allocate and copy */
- j = 0;
- for_each_possible_cpu(cpu)
- for (i = 0; i < pcpu4k_nr_static_pages; i++) {
- void *ptr;
+ return pcpu_4k_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
+ pcpu_fc_alloc, pcpu_fc_free,
+ pcpu4k_populate_pte);
+}
- ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
- if (!ptr)
- goto enomem;
+/* for explicit first chunk allocator selection */
+static char pcpu_chosen_alloc[16] __initdata;
- memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
- pcpu4k_pages[j++] = virt_to_page(ptr);
- }
-
- /* we're ready, commit */
- pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
- pcpu4k_nr_static_pages, static_size);
-
- ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
- PERCPU_FIRST_CHUNK_RESERVE, -1,
- -1, NULL, pcpu4k_populate_pte);
- goto out_free_ar;
-
-enomem:
- while (--j >= 0)
- free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
- ret = -ENOMEM;
-out_free_ar:
- free_bootmem(__pa(pcpu4k_pages), pages_size);
- return ret;
+static int __init percpu_alloc_setup(char *str)
+{
+ strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
+ return 0;
}
+early_param("percpu_alloc", percpu_alloc_setup);
static inline void setup_percpu_segment(int cpu)
{
@@ -346,11 +289,6 @@ static inline void setup_percpu_segment(int cpu)
#endif
}
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
void __init setup_per_cpu_areas(void)
{
size_t static_size = __per_cpu_end - __per_cpu_start;
@@ -367,9 +305,26 @@ void __init setup_per_cpu_areas(void)
* of large page mappings. Please read comments on top of
* each allocator for details.
*/
- ret = setup_pcpu_remap(static_size);
- if (ret < 0)
- ret = setup_pcpu_embed(static_size);
+ ret = -EINVAL;
+ if (strlen(pcpu_chosen_alloc)) {
+ if (strcmp(pcpu_chosen_alloc, "4k")) {
+ if (!strcmp(pcpu_chosen_alloc, "lpage"))
+ ret = setup_pcpu_lpage(static_size, true);
+ else if (!strcmp(pcpu_chosen_alloc, "embed"))
+ ret = setup_pcpu_embed(static_size, true);
+ else
+ pr_warning("PERCPU: unknown allocator %s "
+ "specified\n", pcpu_chosen_alloc);
+ if (ret < 0)
+ pr_warning("PERCPU: %s allocator failed (%zd), "
+ "falling back to 4k\n",
+ pcpu_chosen_alloc, ret);
+ }
+ } else {
+ ret = setup_pcpu_lpage(static_size, false);
+ if (ret < 0)
+ ret = setup_pcpu_embed(static_size, false);
+ }
if (ret < 0)
ret = setup_pcpu_4k(static_size);
if (ret < 0)
@@ -381,7 +336,8 @@ void __init setup_per_cpu_areas(void)
/* alrighty, percpu areas up and running */
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
- per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ per_cpu_offset(cpu) =
+ delta + pcpu_unit_map[cpu] * pcpu_unit_size;
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
per_cpu(cpu_number, cpu) = cpu;
setup_percpu_segment(cpu);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2fecda69ee64..70bd79be115e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1057,12 +1057,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
#endif
current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) {
- alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
- cpumask_clear(per_cpu(cpu_core_map, i));
- cpumask_clear(per_cpu(cpu_sibling_map, i));
- cpumask_clear(cpu_data(i).llc_shared_map);
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
}
set_cpu_sibling_map(0);
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 124d40c575df..8ccabb8a2f6a 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -711,7 +711,6 @@ uv_activation_descriptor_init(int node, int pnode)
unsigned long pa;
unsigned long m;
unsigned long n;
- unsigned long mmr_image;
struct bau_desc *adp;
struct bau_desc *ad2;
@@ -727,12 +726,8 @@ uv_activation_descriptor_init(int node, int pnode)
n = pa >> uv_nshift;
m = pa & uv_mmask;
- mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
- if (mmr_image) {
- uv_write_global_mmr64(pnode, (unsigned long)
- UVH_LB_BAU_SB_DESCRIPTOR_BASE,
- (n << UV_DESC_BASE_PNODE_SHIFT | m));
- }
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
+ (n << UV_DESC_BASE_PNODE_SHIFT | m));
/*
* initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a0f48f5671c0..22a5a6d9d371 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -346,6 +346,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
show_registers(regs);
+ if (panic_on_io_nmi)
+ panic("NMI IOCK error: Not continuing");
+
/* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & 0xf) | 8;
outb(reason, 0x61);
@@ -813,6 +816,28 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
}
/*
+ * __math_state_restore assumes that cr0.TS is already clear and the
+ * fpu state is all ready for use. Used during context switch.
+ */
+void __math_state_restore(void)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = thread->task;
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+ */
+ if (unlikely(restore_fpu_checking(tsk))) {
+ stts();
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
+
+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
+ tsk->fpu_counter++;
+}
+
+/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
*
@@ -843,17 +868,8 @@ asmlinkage void math_state_restore(void)
}
clts(); /* Allow maths ops (or we recurse) */
- /*
- * Paranoid restore. send a SIGSEGV if we fail to restore the state.
- */
- if (unlikely(restore_fpu_checking(tsk))) {
- stts();
- force_sig(SIGSEGV, tsk);
- return;
- }
- thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
- tsk->fpu_counter++;
+ __math_state_restore();
}
EXPORT_SYMBOL_GPL(math_state_restore);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8600a09e0c6c..b84e571f4175 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -1,12 +1,8 @@
#
# KVM configuration
#
-config HAVE_KVM
- bool
-config HAVE_KVM_IRQCHIP
- bool
- default y
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -29,6 +25,9 @@ config KVM
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
select ANON_INODES
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_EVENTFD
+ select KVM_APIC_ARCHITECTURE
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -63,18 +62,6 @@ config KVM_AMD
To compile this as a module, choose M here: the module
will be called kvm-amd.
-config KVM_TRACE
- bool "KVM trace support"
- depends on KVM && SYSFS
- select MARKERS
- select RELAY
- select DEBUG_FS
- default n
- ---help---
- This option allows reading a trace of kvm-related events through
- relayfs. Note the ABI is not considered stable and will be
- modified in future updates.
-
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/lguest/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index b43c4efafe80..afaaa7627d95 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,22 +1,19 @@
-#
-# Makefile for Kernel-based Virtual Machine module
-#
-
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
- coalesced_mmio.o irq_comm.o)
-ifeq ($(CONFIG_KVM_TRACE),y)
-common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
-endif
-ifeq ($(CONFIG_IOMMU_API),y)
-common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
-endif
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
-kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
- i8254.o timer.o
-obj-$(CONFIG_KVM) += kvm.o
-kvm-intel-objs = vmx.o
-obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
-kvm-amd-objs = svm.o
-obj-$(CONFIG_KVM_AMD) += kvm-amd.o
+CFLAGS_x86.o := -I.
+CFLAGS_svm.o := -I.
+CFLAGS_vmx.o := -I.
+
+kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
+ coalesced_mmio.o irq_comm.o eventfd.o)
+kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
+
+kvm-y += x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
+ i8254.o timer.o
+kvm-intel-y += vmx.o
+kvm-amd-y += svm.o
+
+obj-$(CONFIG_KVM) += kvm.o
+obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
+obj-$(CONFIG_KVM_AMD) += kvm-amd.o
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 4d6f0d293ee2..14bbaae72add 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -228,7 +228,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
- if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack)
+ if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
return atomic_read(&pit->pit_state.pit_timer.pending);
return 0;
}
@@ -249,7 +249,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
struct hrtimer *timer;
- if (vcpu->vcpu_id != 0 || !pit)
+ if (!kvm_vcpu_is_bsp(vcpu) || !pit)
return;
timer = &pit->pit_state.pit_timer.timer;
@@ -291,7 +291,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
pt->timer.function = kvm_timer_fn;
pt->t_ops = &kpit_ops;
pt->kvm = ps->pit->kvm;
- pt->vcpu_id = 0;
+ pt->vcpu = pt->kvm->bsp_vcpu;
atomic_set(&pt->pending, 0);
ps->irq_ack = 1;
@@ -342,20 +342,36 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
{
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
pit_load_count(kvm, channel, val);
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
}
-static void pit_ioport_write(struct kvm_io_device *this,
- gpa_t addr, int len, const void *data)
+static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
{
- struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ return container_of(dev, struct kvm_pit, dev);
+}
+
+static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
+{
+ return container_of(dev, struct kvm_pit, speaker_dev);
+}
+
+static inline int pit_in_range(gpa_t addr)
+{
+ return ((addr >= KVM_PIT_BASE_ADDRESS) &&
+ (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
+}
+
+static int pit_ioport_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *data)
+{
+ struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
int channel, access;
struct kvm_kpit_channel_state *s;
u32 val = *(u32 *) data;
+ if (!pit_in_range(addr))
+ return -EOPNOTSUPP;
val &= 0xff;
addr &= KVM_PIT_CHANNEL_MASK;
@@ -418,16 +434,19 @@ static void pit_ioport_write(struct kvm_io_device *this,
}
mutex_unlock(&pit_state->lock);
+ return 0;
}
-static void pit_ioport_read(struct kvm_io_device *this,
- gpa_t addr, int len, void *data)
+static int pit_ioport_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
{
- struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
int ret, count;
struct kvm_kpit_channel_state *s;
+ if (!pit_in_range(addr))
+ return -EOPNOTSUPP;
addr &= KVM_PIT_CHANNEL_MASK;
s = &pit_state->channels[addr];
@@ -482,37 +501,36 @@ static void pit_ioport_read(struct kvm_io_device *this,
memcpy(data, (char *)&ret, len);
mutex_unlock(&pit_state->lock);
+ return 0;
}
-static int pit_in_range(struct kvm_io_device *this, gpa_t addr,
- int len, int is_write)
-{
- return ((addr >= KVM_PIT_BASE_ADDRESS) &&
- (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
-}
-
-static void speaker_ioport_write(struct kvm_io_device *this,
- gpa_t addr, int len, const void *data)
+static int speaker_ioport_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *data)
{
- struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
u32 val = *(u32 *) data;
+ if (addr != KVM_SPEAKER_BASE_ADDRESS)
+ return -EOPNOTSUPP;
mutex_lock(&pit_state->lock);
pit_state->speaker_data_on = (val >> 1) & 1;
pit_set_gate(kvm, 2, val & 1);
mutex_unlock(&pit_state->lock);
+ return 0;
}
-static void speaker_ioport_read(struct kvm_io_device *this,
- gpa_t addr, int len, void *data)
+static int speaker_ioport_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
{
- struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
struct kvm *kvm = pit->kvm;
unsigned int refresh_clock;
int ret;
+ if (addr != KVM_SPEAKER_BASE_ADDRESS)
+ return -EOPNOTSUPP;
/* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
@@ -524,12 +542,7 @@ static void speaker_ioport_read(struct kvm_io_device *this,
len = sizeof(ret);
memcpy(data, (char *)&ret, len);
mutex_unlock(&pit_state->lock);
-}
-
-static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
- int len, int is_write)
-{
- return (addr == KVM_SPEAKER_BASE_ADDRESS);
+ return 0;
}
void kvm_pit_reset(struct kvm_pit *pit)
@@ -560,7 +573,18 @@ static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
}
}
-struct kvm_pit *kvm_create_pit(struct kvm *kvm)
+static const struct kvm_io_device_ops pit_dev_ops = {
+ .read = pit_ioport_read,
+ .write = pit_ioport_write,
+};
+
+static const struct kvm_io_device_ops speaker_dev_ops = {
+ .read = speaker_ioport_read,
+ .write = speaker_ioport_write,
+};
+
+/* Caller must have writers lock on slots_lock */
+struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
struct kvm_kpit_state *pit_state;
@@ -579,19 +603,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
mutex_lock(&pit->pit_state.lock);
spin_lock_init(&pit->pit_state.inject_lock);
- /* Initialize PIO device */
- pit->dev.read = pit_ioport_read;
- pit->dev.write = pit_ioport_write;
- pit->dev.in_range = pit_in_range;
- pit->dev.private = pit;
- kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
-
- pit->speaker_dev.read = speaker_ioport_read;
- pit->speaker_dev.write = speaker_ioport_write;
- pit->speaker_dev.in_range = speaker_in_range;
- pit->speaker_dev.private = pit;
- kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
-
kvm->arch.vpit = pit;
pit->kvm = kvm;
@@ -610,6 +621,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
pit->mask_notifier.func = pit_mask_notifer;
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+ kvm_iodevice_init(&pit->dev, &pit_dev_ops);
+ __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+
+ if (flags & KVM_PIT_SPEAKER_DUMMY) {
+ kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
+ __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
+ }
+
return pit;
}
@@ -634,10 +653,10 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
struct kvm_vcpu *vcpu;
int i;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->irq_lock);
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->irq_lock);
/*
* Provides NMI watchdog support via Virtual Wire mode.
@@ -649,11 +668,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
* VCPU0, and only if its LVT0 is in EXTINT mode.
*/
if (kvm->arch.vapics_in_nmi_mode > 0)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- kvm_apic_nmi_wd_deliver(vcpu);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_apic_nmi_wd_deliver(vcpu);
}
void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index bbd863ff60b7..b2670180f225 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -50,7 +50,7 @@ struct kvm_pit {
void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val);
-struct kvm_pit *kvm_create_pit(struct kvm *kvm);
+struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
void kvm_free_pit(struct kvm *kvm);
void kvm_pit_reset(struct kvm_pit *pit);
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1ccb50c74f18..1d1bb75dc7bc 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -57,7 +57,7 @@ static void pic_unlock(struct kvm_pic *s)
}
if (wakeup) {
- vcpu = s->kvm->vcpus[0];
+ vcpu = s->kvm->bsp_vcpu;
if (vcpu)
kvm_vcpu_kick(vcpu);
}
@@ -72,8 +72,10 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
void kvm_pic_clear_isr_ack(struct kvm *kvm)
{
struct kvm_pic *s = pic_irqchip(kvm);
+ pic_lock(s);
s->pics[0].isr_ack = 0xff;
s->pics[1].isr_ack = 0xff;
+ pic_unlock(s);
}
/*
@@ -252,7 +254,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
{
int irq, irqbase, n;
struct kvm *kvm = s->pics_state->irq_request_opaque;
- struct kvm_vcpu *vcpu0 = kvm->vcpus[0];
+ struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu;
if (s == &s->pics_state->pics[0])
irqbase = 0;
@@ -428,8 +430,7 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1)
return s->elcr;
}
-static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
- int len, int is_write)
+static int picdev_in_range(gpa_t addr)
{
switch (addr) {
case 0x20:
@@ -444,16 +445,23 @@ static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
}
}
-static void picdev_write(struct kvm_io_device *this,
+static inline struct kvm_pic *to_pic(struct kvm_io_device *dev)
+{
+ return container_of(dev, struct kvm_pic, dev);
+}
+
+static int picdev_write(struct kvm_io_device *this,
gpa_t addr, int len, const void *val)
{
- struct kvm_pic *s = this->private;
+ struct kvm_pic *s = to_pic(this);
unsigned char data = *(unsigned char *)val;
+ if (!picdev_in_range(addr))
+ return -EOPNOTSUPP;
if (len != 1) {
if (printk_ratelimit())
printk(KERN_ERR "PIC: non byte write\n");
- return;
+ return 0;
}
pic_lock(s);
switch (addr) {
@@ -469,18 +477,21 @@ static void picdev_write(struct kvm_io_device *this,
break;
}
pic_unlock(s);
+ return 0;
}
-static void picdev_read(struct kvm_io_device *this,
- gpa_t addr, int len, void *val)
+static int picdev_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *val)
{
- struct kvm_pic *s = this->private;
+ struct kvm_pic *s = to_pic(this);
unsigned char data = 0;
+ if (!picdev_in_range(addr))
+ return -EOPNOTSUPP;
if (len != 1) {
if (printk_ratelimit())
printk(KERN_ERR "PIC: non byte read\n");
- return;
+ return 0;
}
pic_lock(s);
switch (addr) {
@@ -497,6 +508,7 @@ static void picdev_read(struct kvm_io_device *this,
}
*(unsigned char *)val = data;
pic_unlock(s);
+ return 0;
}
/*
@@ -505,7 +517,7 @@ static void picdev_read(struct kvm_io_device *this,
static void pic_irq_request(void *opaque, int level)
{
struct kvm *kvm = opaque;
- struct kvm_vcpu *vcpu = kvm->vcpus[0];
+ struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
struct kvm_pic *s = pic_irqchip(kvm);
int irq = pic_get_irq(&s->pics[0]);
@@ -516,6 +528,11 @@ static void pic_irq_request(void *opaque, int level)
}
}
+static const struct kvm_io_device_ops picdev_ops = {
+ .read = picdev_read,
+ .write = picdev_write,
+};
+
struct kvm_pic *kvm_create_pic(struct kvm *kvm)
{
struct kvm_pic *s;
@@ -534,10 +551,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
/*
* Initialize PIO device
*/
- s->dev.read = picdev_read;
- s->dev.write = picdev_write;
- s->dev.in_range = picdev_in_range;
- s->dev.private = s;
- kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev);
+ kvm_iodevice_init(&s->dev, &picdev_ops);
+ kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev);
return s;
}
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 1ff819dce7d3..7bcc5b6a4403 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -29,4 +29,13 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
kvm_register_write(vcpu, VCPU_REGS_RIP, val);
}
+static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
+{
+ if (!test_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_avail))
+ kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
+
+ return vcpu->arch.pdptrs[index];
+}
+
#endif
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
deleted file mode 100644
index ed66e4c078dc..000000000000
--- a/arch/x86/kvm/kvm_svm.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __KVM_SVM_H
-#define __KVM_SVM_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/kvm_host.h>
-#include <asm/msr.h>
-
-#include <asm/svm.h>
-
-static const u32 host_save_user_msrs[] = {
-#ifdef CONFIG_X86_64
- MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
- MSR_FS_BASE,
-#endif
- MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
-};
-
-#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
-
-struct kvm_vcpu;
-
-struct vcpu_svm {
- struct kvm_vcpu vcpu;
- struct vmcb *vmcb;
- unsigned long vmcb_pa;
- struct svm_cpu_data *svm_data;
- uint64_t asid_generation;
-
- u64 next_rip;
-
- u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
- u64 host_gs_base;
- unsigned long host_cr2;
-
- u32 *msrpm;
- struct vmcb *hsave;
- u64 hsave_msr;
-
- u64 nested_vmcb;
-
- /* These are the merged vectors */
- u32 *nested_msrpm;
-
- /* gpa pointers to the real vectors */
- u64 nested_vmcb_msrpm;
-};
-
-#endif
-
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
index 26bd6ba74e1c..55c7524dda54 100644
--- a/arch/x86/kvm/kvm_timer.h
+++ b/arch/x86/kvm/kvm_timer.h
@@ -6,7 +6,7 @@ struct kvm_timer {
bool reinject;
struct kvm_timer_ops *t_ops;
struct kvm *kvm;
- int vcpu_id;
+ struct kvm_vcpu *vcpu;
};
struct kvm_timer_ops {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ae99d83f81a3..6f559d689af9 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,8 +32,11 @@
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
+#include <asm/apicdef.h>
#include "kvm_cache_regs.h"
#include "irq.h"
+#include "trace.h"
+#include "x86.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -141,6 +144,26 @@ static inline int apic_lvt_nmi_mode(u32 lvt_val)
return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
}
+void kvm_apic_set_version(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct kvm_cpuid_entry2 *feat;
+ u32 v = APIC_VERSION;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
+ if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
+ v |= APIC_LVR_DIRECTED_EOI;
+ apic_set_reg(apic, APIC_LVR, v);
+}
+
+static inline int apic_x2apic_mode(struct kvm_lapic *apic)
+{
+ return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
+}
+
static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */
LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
@@ -165,36 +188,52 @@ static int find_highest_vector(void *bitmap)
static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
{
+ apic->irr_pending = true;
return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
}
-static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+static inline int apic_search_irr(struct kvm_lapic *apic)
{
- apic_clear_vector(vec, apic->regs + APIC_IRR);
+ return find_highest_vector(apic->regs + APIC_IRR);
}
static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{
int result;
- result = find_highest_vector(apic->regs + APIC_IRR);
+ if (!apic->irr_pending)
+ return -1;
+
+ result = apic_search_irr(apic);
ASSERT(result == -1 || result >= 16);
return result;
}
+static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+{
+ apic->irr_pending = false;
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
+ if (apic_search_irr(apic) != -1)
+ apic->irr_pending = true;
+}
+
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
+ /* This may race with setting of irr in __apic_accept_irq() and
+ * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
+ * will cause vmexit immediately and the value will be recalculated
+ * on the next vmentry.
+ */
if (!apic)
return 0;
highest_irr = apic_find_highest_irr(apic);
return highest_irr;
}
-EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode);
@@ -251,7 +290,12 @@ int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
{
int result = 0;
- u8 logical_id;
+ u32 logical_id;
+
+ if (apic_x2apic_mode(apic)) {
+ logical_id = apic_get_reg(apic, APIC_LDR);
+ return logical_id & mda;
+ }
logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
@@ -425,7 +469,11 @@ static void apic_set_eoi(struct kvm_lapic *apic)
trigger_mode = IOAPIC_LEVEL_TRIG;
else
trigger_mode = IOAPIC_EDGE_TRIG;
- kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+ if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) {
+ mutex_lock(&apic->vcpu->kvm->irq_lock);
+ kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+ mutex_unlock(&apic->vcpu->kvm->irq_lock);
+ }
}
static void apic_send_ipi(struct kvm_lapic *apic)
@@ -440,7 +488,10 @@ static void apic_send_ipi(struct kvm_lapic *apic)
irq.level = icr_low & APIC_INT_ASSERT;
irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
irq.shorthand = icr_low & APIC_SHORT_MASK;
- irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
+ if (apic_x2apic_mode(apic))
+ irq.dest_id = icr_high;
+ else
+ irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
@@ -449,7 +500,9 @@ static void apic_send_ipi(struct kvm_lapic *apic)
irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector);
+ mutex_lock(&apic->vcpu->kvm->irq_lock);
kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
+ mutex_unlock(&apic->vcpu->kvm->irq_lock);
}
static u32 apic_get_tmcct(struct kvm_lapic *apic)
@@ -495,12 +548,13 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{
u32 val = 0;
- KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
-
if (offset >= LAPIC_MMIO_LENGTH)
return 0;
switch (offset) {
+ case APIC_ID:
+ apic_get_reg(apic, offset);
+ break;
case APIC_ARBPRI:
printk(KERN_WARNING "Access APIC ARBPRI register "
"which is for P6\n");
@@ -522,21 +576,35 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
return val;
}
-static void apic_mmio_read(struct kvm_io_device *this,
- gpa_t address, int len, void *data)
+static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
+{
+ return container_of(dev, struct kvm_lapic, dev);
+}
+
+static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
+ void *data)
{
- struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
- unsigned int offset = address - apic->base_address;
unsigned char alignment = offset & 0xf;
u32 result;
+ /* this bitmask has a bit cleared for each reserver register */
+ static const u64 rmask = 0x43ff01ffffffe70cULL;
if ((alignment + len) > 4) {
- printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d",
- (unsigned long)address, len);
- return;
+ printk(KERN_ERR "KVM_APIC_READ: alignment error %x %d\n",
+ offset, len);
+ return 1;
+ }
+
+ if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
+ printk(KERN_ERR "KVM_APIC_READ: read reserved register %x\n",
+ offset);
+ return 1;
}
+
result = __apic_read(apic, offset & ~0xf);
+ trace_kvm_apic_read(offset, result);
+
switch (len) {
case 1:
case 2:
@@ -548,6 +616,28 @@ static void apic_mmio_read(struct kvm_io_device *this,
"should be 1,2, or 4 instead\n", len);
break;
}
+ return 0;
+}
+
+static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
+{
+ return apic_hw_enabled(apic) &&
+ addr >= apic->base_address &&
+ addr < apic->base_address + LAPIC_MMIO_LENGTH;
+}
+
+static int apic_mmio_read(struct kvm_io_device *this,
+ gpa_t address, int len, void *data)
+{
+ struct kvm_lapic *apic = to_lapic(this);
+ u32 offset = address - apic->base_address;
+
+ if (!apic_mmio_in_range(apic, address))
+ return -EOPNOTSUPP;
+
+ apic_reg_read(apic, offset, len, data);
+
+ return 0;
}
static void update_divide_count(struct kvm_lapic *apic)
@@ -603,40 +693,18 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
}
-static void apic_mmio_write(struct kvm_io_device *this,
- gpa_t address, int len, const void *data)
+static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
{
- struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
- unsigned int offset = address - apic->base_address;
- unsigned char alignment = offset & 0xf;
- u32 val;
-
- /*
- * APIC register must be aligned on 128-bits boundary.
- * 32/64/128 bits registers must be accessed thru 32 bits.
- * Refer SDM 8.4.1
- */
- if (len != 4 || alignment) {
- /* Don't shout loud, $infamous_os would cause only noise. */
- apic_debug("apic write: bad size=%d %lx\n",
- len, (long)address);
- return;
- }
-
- val = *(u32 *) data;
-
- /* too common printing */
- if (offset != APIC_EOI)
- apic_debug("%s: offset 0x%x with length 0x%x, and value is "
- "0x%x\n", __func__, offset, len, val);
-
- offset &= 0xff0;
+ int ret = 0;
- KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
+ trace_kvm_apic_write(reg, val);
- switch (offset) {
+ switch (reg) {
case APIC_ID: /* Local APIC ID */
- apic_set_reg(apic, APIC_ID, val);
+ if (!apic_x2apic_mode(apic))
+ apic_set_reg(apic, APIC_ID, val);
+ else
+ ret = 1;
break;
case APIC_TASKPRI:
@@ -649,15 +717,24 @@ static void apic_mmio_write(struct kvm_io_device *this,
break;
case APIC_LDR:
- apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
+ if (!apic_x2apic_mode(apic))
+ apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
+ else
+ ret = 1;
break;
case APIC_DFR:
- apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
+ if (!apic_x2apic_mode(apic))
+ apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
+ else
+ ret = 1;
break;
- case APIC_SPIV:
- apic_set_reg(apic, APIC_SPIV, val & 0x3ff);
+ case APIC_SPIV: {
+ u32 mask = 0x3ff;
+ if (apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
+ mask |= APIC_SPIV_DIRECTED_EOI;
+ apic_set_reg(apic, APIC_SPIV, val & mask);
if (!(val & APIC_SPIV_APIC_ENABLED)) {
int i;
u32 lvt_val;
@@ -672,7 +749,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
}
break;
-
+ }
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
@@ -680,7 +757,9 @@ static void apic_mmio_write(struct kvm_io_device *this,
break;
case APIC_ICR2:
- apic_set_reg(apic, APIC_ICR2, val & 0xff000000);
+ if (!apic_x2apic_mode(apic))
+ val &= 0xff000000;
+ apic_set_reg(apic, APIC_ICR2, val);
break;
case APIC_LVT0:
@@ -694,8 +773,8 @@ static void apic_mmio_write(struct kvm_io_device *this,
if (!apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
- val &= apic_lvt_mask[(offset - APIC_LVTT) >> 4];
- apic_set_reg(apic, offset, val);
+ val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
+ apic_set_reg(apic, reg, val);
break;
@@ -703,7 +782,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
hrtimer_cancel(&apic->lapic_timer.timer);
apic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
- return;
+ break;
case APIC_TDCR:
if (val & 4)
@@ -712,27 +791,59 @@ static void apic_mmio_write(struct kvm_io_device *this,
update_divide_count(apic);
break;
+ case APIC_ESR:
+ if (apic_x2apic_mode(apic) && val != 0) {
+ printk(KERN_ERR "KVM_WRITE:ESR not zero %x\n", val);
+ ret = 1;
+ }
+ break;
+
+ case APIC_SELF_IPI:
+ if (apic_x2apic_mode(apic)) {
+ apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
+ } else
+ ret = 1;
+ break;
default:
- apic_debug("Local APIC Write to read-only register %x\n",
- offset);
+ ret = 1;
break;
}
-
+ if (ret)
+ apic_debug("Local APIC Write to read-only register %x\n", reg);
+ return ret;
}
-static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr,
- int len, int size)
+static int apic_mmio_write(struct kvm_io_device *this,
+ gpa_t address, int len, const void *data)
{
- struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
- int ret = 0;
+ struct kvm_lapic *apic = to_lapic(this);
+ unsigned int offset = address - apic->base_address;
+ u32 val;
+ if (!apic_mmio_in_range(apic, address))
+ return -EOPNOTSUPP;
- if (apic_hw_enabled(apic) &&
- (addr >= apic->base_address) &&
- (addr < (apic->base_address + LAPIC_MMIO_LENGTH)))
- ret = 1;
+ /*
+ * APIC register must be aligned on 128-bits boundary.
+ * 32/64/128 bits registers must be accessed thru 32 bits.
+ * Refer SDM 8.4.1
+ */
+ if (len != 4 || (offset & 0xf)) {
+ /* Don't shout loud, $infamous_os would cause only noise. */
+ apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
+ return;
+ }
- return ret;
+ val = *(u32*)data;
+
+ /* too common printing */
+ if (offset != APIC_EOI)
+ apic_debug("%s: offset 0x%x with length 0x%x, and value is "
+ "0x%x\n", __func__, offset, len, val);
+
+ apic_reg_write(apic, offset & 0xff0, val);
+
+ return 0;
}
void kvm_free_lapic(struct kvm_vcpu *vcpu)
@@ -763,7 +874,6 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (apic_get_reg(apic, APIC_TASKPRI) & 4));
}
-EXPORT_SYMBOL_GPL(kvm_lapic_set_tpr);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
@@ -776,7 +886,6 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
return (tpr & 0xf0) >> 4;
}
-EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
@@ -787,10 +896,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
vcpu->arch.apic_base = value;
return;
}
- if (apic->vcpu->vcpu_id)
+
+ if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
+ if (apic_x2apic_mode(apic)) {
+ u32 id = kvm_apic_id(apic);
+ u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf));
+ apic_set_reg(apic, APIC_LDR, ldr);
+ }
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
@@ -800,12 +915,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
}
-u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.apic_base;
-}
-EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
-
void kvm_lapic_reset(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
@@ -821,7 +930,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
hrtimer_cancel(&apic->lapic_timer.timer);
apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
- apic_set_reg(apic, APIC_LVR, APIC_VERSION);
+ kvm_apic_set_version(apic->vcpu);
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
@@ -842,9 +951,10 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
+ apic->irr_pending = false;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
- if (vcpu->vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(vcpu))
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
apic_update_ppr(apic);
@@ -855,7 +965,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address);
}
-EXPORT_SYMBOL_GPL(kvm_lapic_reset);
bool kvm_apic_present(struct kvm_vcpu *vcpu)
{
@@ -866,7 +975,6 @@ int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
{
return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic);
}
-EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
/*
*----------------------------------------------------------------------
@@ -917,6 +1025,11 @@ static struct kvm_timer_ops lapic_timer_ops = {
.is_periodic = lapic_is_periodic,
};
+static const struct kvm_io_device_ops apic_mmio_ops = {
+ .read = apic_mmio_read,
+ .write = apic_mmio_write,
+};
+
int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
@@ -945,16 +1058,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
apic->lapic_timer.timer.function = kvm_timer_fn;
apic->lapic_timer.t_ops = &lapic_timer_ops;
apic->lapic_timer.kvm = vcpu->kvm;
- apic->lapic_timer.vcpu_id = vcpu->vcpu_id;
+ apic->lapic_timer.vcpu = vcpu;
apic->base_address = APIC_DEFAULT_PHYS_BASE;
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
kvm_lapic_reset(vcpu);
- apic->dev.read = apic_mmio_read;
- apic->dev.write = apic_mmio_write;
- apic->dev.in_range = apic_mmio_range;
- apic->dev.private = apic;
+ kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
return 0;
nomem_free_apic:
@@ -962,7 +1072,6 @@ nomem_free_apic:
nomem:
return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(kvm_create_lapic);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
@@ -985,7 +1094,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
int r = 0;
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
if (!apic_hw_enabled(vcpu->arch.apic))
r = 1;
if ((lvt0 & APIC_LVT_MASKED) == 0 &&
@@ -1025,7 +1134,8 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
apic->base_address = vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
- apic_set_reg(apic, APIC_LVR, APIC_VERSION);
+ kvm_apic_set_version(vcpu);
+
apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer);
update_divide_count(apic);
@@ -1092,3 +1202,35 @@ void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
vcpu->arch.apic->vapic_addr = vapic_addr;
}
+
+int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 reg = (msr - APIC_BASE_MSR) << 4;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
+ return 1;
+
+ /* if this is ICR write vector before command */
+ if (msr == 0x830)
+ apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
+ return apic_reg_write(apic, reg, (u32)data);
+}
+
+int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
+ return 1;
+
+ if (apic_reg_read(apic, reg, 4, &low))
+ return 1;
+ if (msr == 0x830)
+ apic_reg_read(apic, APIC_ICR2, 4, &high);
+
+ *data = (((u64)high) << 32) | low;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index a587f8349c46..40010b09c4aa 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -12,6 +12,7 @@ struct kvm_lapic {
struct kvm_timer lapic_timer;
u32 divide_count;
struct kvm_vcpu *vcpu;
+ bool irr_pending;
struct page *regs_page;
void *regs;
gpa_t vapic_addr;
@@ -28,6 +29,7 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
+void kvm_apic_set_version(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
@@ -44,4 +46,6 @@ void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5c3d6e81a7dc..1f24d8833d61 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,6 +18,7 @@
*/
#include "mmu.h"
+#include "kvm_cache_regs.h"
#include <linux/kvm_host.h>
#include <linux/types.h>
@@ -142,7 +143,7 @@ module_param(oos_shadow, bool, 0644);
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
struct kvm_rmap_desc {
- u64 *shadow_ptes[RMAP_EXT];
+ u64 *sptes[RMAP_EXT];
struct kvm_rmap_desc *more;
};
@@ -239,16 +240,25 @@ static int is_writeble_pte(unsigned long pte)
return pte & PT_WRITABLE_MASK;
}
-static int is_dirty_pte(unsigned long pte)
+static int is_dirty_gpte(unsigned long pte)
{
- return pte & shadow_dirty_mask;
+ return pte & PT_DIRTY_MASK;
}
-static int is_rmap_pte(u64 pte)
+static int is_rmap_spte(u64 pte)
{
return is_shadow_present_pte(pte);
}
+static int is_last_spte(u64 pte, int level)
+{
+ if (level == PT_PAGE_TABLE_LEVEL)
+ return 1;
+ if (level == PT_DIRECTORY_LEVEL && is_large_pte(pte))
+ return 1;
+ return 0;
+}
+
static pfn_t spte_to_pfn(u64 pte)
{
return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -261,7 +271,7 @@ static gfn_t pse36_gfn_delta(u32 gpte)
return (gpte & PT32_DIR_PSE36_MASK) << shift;
}
-static void set_shadow_pte(u64 *sptep, u64 spte)
+static void __set_spte(u64 *sptep, u64 spte)
{
#ifdef CONFIG_X86_64
set_64bit((unsigned long *)sptep, spte);
@@ -384,9 +394,9 @@ static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
{
unsigned long idx;
- idx = (gfn / KVM_PAGES_PER_HPAGE) -
- (slot->base_gfn / KVM_PAGES_PER_HPAGE);
- return &slot->lpage_info[idx].write_count;
+ idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
+ return &slot->lpage_info[0][idx].write_count;
}
static void account_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -475,10 +485,10 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
if (!lpage)
return &slot->rmap[gfn - slot->base_gfn];
- idx = (gfn / KVM_PAGES_PER_HPAGE) -
- (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+ idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
- return &slot->lpage_info[idx].rmap_pde;
+ return &slot->lpage_info[0][idx].rmap_pde;
}
/*
@@ -497,7 +507,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
unsigned long *rmapp;
int i;
- if (!is_rmap_pte(*spte))
+ if (!is_rmap_spte(*spte))
return;
gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte));
@@ -509,21 +519,21 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
} else if (!(*rmapp & 1)) {
rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_rmap_desc(vcpu);
- desc->shadow_ptes[0] = (u64 *)*rmapp;
- desc->shadow_ptes[1] = spte;
+ desc->sptes[0] = (u64 *)*rmapp;
+ desc->sptes[1] = spte;
*rmapp = (unsigned long)desc | 1;
} else {
rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
+ while (desc->sptes[RMAP_EXT-1] && desc->more)
desc = desc->more;
- if (desc->shadow_ptes[RMAP_EXT-1]) {
+ if (desc->sptes[RMAP_EXT-1]) {
desc->more = mmu_alloc_rmap_desc(vcpu);
desc = desc->more;
}
- for (i = 0; desc->shadow_ptes[i]; ++i)
+ for (i = 0; desc->sptes[i]; ++i)
;
- desc->shadow_ptes[i] = spte;
+ desc->sptes[i] = spte;
}
}
@@ -534,14 +544,14 @@ static void rmap_desc_remove_entry(unsigned long *rmapp,
{
int j;
- for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
+ for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
;
- desc->shadow_ptes[i] = desc->shadow_ptes[j];
- desc->shadow_ptes[j] = NULL;
+ desc->sptes[i] = desc->sptes[j];
+ desc->sptes[j] = NULL;
if (j != 0)
return;
if (!prev_desc && !desc->more)
- *rmapp = (unsigned long)desc->shadow_ptes[0];
+ *rmapp = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
@@ -559,7 +569,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
unsigned long *rmapp;
int i;
- if (!is_rmap_pte(*spte))
+ if (!is_rmap_spte(*spte))
return;
sp = page_header(__pa(spte));
pfn = spte_to_pfn(*spte);
@@ -586,8 +596,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
prev_desc = NULL;
while (desc) {
- for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
- if (desc->shadow_ptes[i] == spte) {
+ for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
+ if (desc->sptes[i] == spte) {
rmap_desc_remove_entry(rmapp,
desc, i,
prev_desc);
@@ -618,10 +628,10 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
prev_desc = NULL;
prev_spte = NULL;
while (desc) {
- for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
+ for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
if (prev_spte == spte)
- return desc->shadow_ptes[i];
- prev_spte = desc->shadow_ptes[i];
+ return desc->sptes[i];
+ prev_spte = desc->sptes[i];
}
desc = desc->more;
}
@@ -643,7 +653,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
if (is_writeble_pte(*spte)) {
- set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+ __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
write_protected = 1;
}
spte = rmap_next(kvm, rmapp, spte);
@@ -667,7 +677,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
if (is_writeble_pte(*spte)) {
rmap_remove(kvm, spte);
--kvm->stat.lpages;
- set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ __set_spte(spte, shadow_trap_nonpresent_pte);
spte = NULL;
write_protected = 1;
}
@@ -686,7 +696,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
rmap_remove(kvm, spte);
- set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ __set_spte(spte, shadow_trap_nonpresent_pte);
need_tlb_flush = 1;
}
return need_tlb_flush;
@@ -714,11 +724,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+ int idx = gfn_offset /
+ KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL);
retval |= handler(kvm, &memslot->rmap[gfn_offset]);
retval |= handler(kvm,
- &memslot->lpage_info[
- gfn_offset /
- KVM_PAGES_PER_HPAGE].rmap_pde);
+ &memslot->lpage_info[0][idx].rmap_pde);
}
}
@@ -1272,6 +1282,11 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false;
+
+ if (iterator->level == PT_PAGE_TABLE_LEVEL)
+ if (is_large_pte(*iterator->sptep))
+ return false;
+
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true;
@@ -1292,25 +1307,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
pt = sp->spt;
- if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- if (is_shadow_present_pte(pt[i]))
- rmap_remove(kvm, &pt[i]);
- pt[i] = shadow_trap_nonpresent_pte;
- }
- return;
- }
-
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
ent = pt[i];
if (is_shadow_present_pte(ent)) {
- if (!is_large_pte(ent)) {
+ if (!is_last_spte(ent, sp->role.level)) {
ent &= PT64_BASE_ADDR_MASK;
mmu_page_remove_parent_pte(page_header(ent),
&pt[i]);
} else {
- --kvm->stat.lpages;
+ if (is_large_pte(ent))
+ --kvm->stat.lpages;
rmap_remove(kvm, &pt[i]);
}
}
@@ -1326,10 +1333,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm->vcpus[i]->arch.last_pte_updated = NULL;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ vcpu->arch.last_pte_updated = NULL;
}
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -1348,7 +1355,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
}
BUG_ON(!parent_pte);
kvm_mmu_put_page(sp, parent_pte);
- set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
+ __set_spte(parent_pte, shadow_trap_nonpresent_pte);
}
}
@@ -1495,7 +1502,7 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
if (pt[i] == shadow_notrap_nonpresent_pte)
- set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
+ __set_spte(&pt[i], shadow_trap_nonpresent_pte);
}
}
@@ -1661,7 +1668,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
return 0;
}
-static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int user_fault,
int write_fault, int dirty, int largepage,
gfn_t gfn, pfn_t pfn, bool speculative,
@@ -1711,7 +1718,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* is responsibility of mmu_get_page / kvm_sync_page.
* Same reasoning can be applied to dirty page accounting.
*/
- if (!can_unsync && is_writeble_pte(*shadow_pte))
+ if (!can_unsync && is_writeble_pte(*sptep))
goto set_pte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1728,61 +1735,61 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
mark_page_dirty(vcpu->kvm, gfn);
set_pte:
- set_shadow_pte(shadow_pte, spte);
+ __set_spte(sptep, spte);
return ret;
}
-static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pt_access, unsigned pte_access,
int user_fault, int write_fault, int dirty,
int *ptwrite, int largepage, gfn_t gfn,
pfn_t pfn, bool speculative)
{
int was_rmapped = 0;
- int was_writeble = is_writeble_pte(*shadow_pte);
+ int was_writeble = is_writeble_pte(*sptep);
pgprintk("%s: spte %llx access %x write_fault %d"
" user_fault %d gfn %lx\n",
- __func__, *shadow_pte, pt_access,
+ __func__, *sptep, pt_access,
write_fault, user_fault, gfn);
- if (is_rmap_pte(*shadow_pte)) {
+ if (is_rmap_spte(*sptep)) {
/*
* If we overwrite a PTE page pointer with a 2MB PMD, unlink
* the parent of the now unreachable PTE.
*/
- if (largepage && !is_large_pte(*shadow_pte)) {
+ if (largepage && !is_large_pte(*sptep)) {
struct kvm_mmu_page *child;
- u64 pte = *shadow_pte;
+ u64 pte = *sptep;
child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(child, shadow_pte);
- } else if (pfn != spte_to_pfn(*shadow_pte)) {
+ mmu_page_remove_parent_pte(child, sptep);
+ } else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %lx new %lx\n",
- spte_to_pfn(*shadow_pte), pfn);
- rmap_remove(vcpu->kvm, shadow_pte);
+ spte_to_pfn(*sptep), pfn);
+ rmap_remove(vcpu->kvm, sptep);
} else
was_rmapped = 1;
}
- if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
+ if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
dirty, largepage, gfn, pfn, speculative, true)) {
if (write_fault)
*ptwrite = 1;
kvm_x86_ops->tlb_flush(vcpu);
}
- pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
+ pgprintk("%s: setting spte %llx\n", __func__, *sptep);
pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
- is_large_pte(*shadow_pte)? "2MB" : "4kB",
- is_present_pte(*shadow_pte)?"RW":"R", gfn,
- *shadow_pte, shadow_pte);
- if (!was_rmapped && is_large_pte(*shadow_pte))
+ is_large_pte(*sptep)? "2MB" : "4kB",
+ is_present_pte(*sptep)?"RW":"R", gfn,
+ *shadow_pte, sptep);
+ if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
- page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+ page_header_update_slot(vcpu->kvm, sptep, gfn);
if (!was_rmapped) {
- rmap_add(vcpu, shadow_pte, gfn, largepage);
- if (!is_rmap_pte(*shadow_pte))
+ rmap_add(vcpu, sptep, gfn, largepage);
+ if (!is_rmap_spte(*sptep))
kvm_release_pfn_clean(pfn);
} else {
if (was_writeble)
@@ -1791,7 +1798,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
kvm_release_pfn_clean(pfn);
}
if (speculative) {
- vcpu->arch.last_pte_updated = shadow_pte;
+ vcpu->arch.last_pte_updated = sptep;
vcpu->arch.last_pte_gfn = gfn;
}
}
@@ -1829,10 +1836,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
return -ENOMEM;
}
- set_shadow_pte(iterator.sptep,
- __pa(sp->spt)
- | PT_PRESENT_MASK | PT_WRITABLE_MASK
- | shadow_user_mask | shadow_x_mask);
+ __set_spte(iterator.sptep,
+ __pa(sp->spt)
+ | PT_PRESENT_MASK | PT_WRITABLE_MASK
+ | shadow_user_mask | shadow_x_mask);
}
}
return pt_write;
@@ -1845,8 +1852,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
pfn_t pfn;
unsigned long mmu_seq;
- if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
- gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ if (is_largepage_backed(vcpu, gfn &
+ ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
largepage = 1;
}
@@ -1930,6 +1938,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn;
struct kvm_mmu_page *sp;
int direct = 0;
+ u64 pdptr;
root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
@@ -1957,11 +1966,12 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
- if (!is_present_pte(vcpu->arch.pdptrs[i])) {
+ pdptr = kvm_pdptr_read(vcpu, i);
+ if (!is_present_gpte(pdptr)) {
vcpu->arch.mmu.pae_root[i] = 0;
continue;
}
- root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
+ root_gfn = pdptr >> PAGE_SHIFT;
} else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
@@ -2049,8 +2059,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
if (r)
return r;
- if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
- gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ if (is_largepage_backed(vcpu, gfn &
+ ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
largepage = 1;
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
@@ -2157,7 +2168,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
else
/* 32 bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT32E_ROOT_LEVEL:
context->rsvd_bits_mask[0][2] =
@@ -2170,7 +2181,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT64_ROOT_LEVEL:
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
@@ -2186,7 +2197,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
}
}
@@ -2354,15 +2365,14 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
pte = *spte;
if (is_shadow_present_pte(pte)) {
- if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
- is_large_pte(pte))
+ if (is_last_spte(pte, sp->role.level))
rmap_remove(vcpu->kvm, spte);
else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
mmu_page_remove_parent_pte(child, spte);
}
}
- set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ __set_spte(spte, shadow_trap_nonpresent_pte);
if (is_large_pte(pte))
--vcpu->kvm->stat.lpages;
}
@@ -2448,12 +2458,12 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
if ((bytes == 4) && (gpa % 4 == 0))
memcpy((void *)&gpte, new, 4);
}
- if (!is_present_pte(gpte))
+ if (!is_present_gpte(gpte))
return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
- gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
vcpu->arch.update_pte.largepage = 1;
}
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
@@ -2646,8 +2656,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
++vcpu->stat.mmio_exits;
return 0;
case EMULATE_FAIL:
- kvm_report_emulation_failure(vcpu, "pagetable");
- return 1;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ return 0;
default:
BUG();
}
@@ -3005,6 +3016,24 @@ out:
return r;
}
+int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
+{
+ struct kvm_shadow_walk_iterator iterator;
+ int nr_sptes = 0;
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ for_each_shadow_entry(vcpu, addr, iterator) {
+ sptes[iterator.level-1] = *iterator.sptep;
+ nr_sptes++;
+ if (!is_shadow_present_pte(*iterator.sptep))
+ break;
+ }
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+ return nr_sptes;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
+
#ifdef AUDIT
static const char *audit_msg;
@@ -3017,6 +3046,54 @@ static gva_t canonicalize(gva_t gva)
return gva;
}
+
+typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
+ u64 *sptep);
+
+static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
+ inspect_spte_fn fn)
+{
+ int i;
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+ u64 ent = sp->spt[i];
+
+ if (is_shadow_present_pte(ent)) {
+ if (!is_last_spte(ent, sp->role.level)) {
+ struct kvm_mmu_page *child;
+ child = page_header(ent & PT64_BASE_ADDR_MASK);
+ __mmu_spte_walk(kvm, child, fn);
+ } else
+ fn(kvm, sp, &sp->spt[i]);
+ }
+ }
+}
+
+static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
+{
+ int i;
+ struct kvm_mmu_page *sp;
+
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+ if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+ hpa_t root = vcpu->arch.mmu.root_hpa;
+ sp = page_header(root);
+ __mmu_spte_walk(vcpu->kvm, sp, fn);
+ return;
+ }
+ for (i = 0; i < 4; ++i) {
+ hpa_t root = vcpu->arch.mmu.pae_root[i];
+
+ if (root && VALID_PAGE(root)) {
+ root &= PT64_BASE_ADDR_MASK;
+ sp = page_header(root);
+ __mmu_spte_walk(vcpu->kvm, sp, fn);
+ }
+ }
+ return;
+}
+
static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
gva_t va, int level)
{
@@ -3031,20 +3108,19 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
continue;
va = canonicalize(va);
- if (level > 1) {
- if (ent == shadow_notrap_nonpresent_pte)
- printk(KERN_ERR "audit: (%s) nontrapping pte"
- " in nonleaf level: levels %d gva %lx"
- " level %d pte %llx\n", audit_msg,
- vcpu->arch.mmu.root_level, va, level, ent);
- else
- audit_mappings_page(vcpu, ent, va, level - 1);
- } else {
+ if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
+ audit_mappings_page(vcpu, ent, va, level - 1);
+ else {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
gfn_t gfn = gpa >> PAGE_SHIFT;
pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ continue;
+ }
+
if (is_shadow_present_pte(ent)
&& (ent & PT64_BASE_ADDR_MASK) != hpa)
printk(KERN_ERR "xx audit error: (%s) levels %d"
@@ -3098,7 +3174,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
while (d) {
for (k = 0; k < RMAP_EXT; ++k)
- if (d->shadow_ptes[k])
+ if (d->sptes[k])
++nmaps;
else
break;
@@ -3109,9 +3185,48 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
return nmaps;
}
-static int count_writable_mappings(struct kvm_vcpu *vcpu)
+void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
+{
+ unsigned long *rmapp;
+ struct kvm_mmu_page *rev_sp;
+ gfn_t gfn;
+
+ if (*sptep & PT_WRITABLE_MASK) {
+ rev_sp = page_header(__pa(sptep));
+ gfn = rev_sp->gfns[sptep - rev_sp->spt];
+
+ if (!gfn_to_memslot(kvm, gfn)) {
+ if (!printk_ratelimit())
+ return;
+ printk(KERN_ERR "%s: no memslot for gfn %ld\n",
+ audit_msg, gfn);
+ printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
+ audit_msg, sptep - rev_sp->spt,
+ rev_sp->gfn);
+ dump_stack();
+ return;
+ }
+
+ rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
+ is_large_pte(*sptep));
+ if (!*rmapp) {
+ if (!printk_ratelimit())
+ return;
+ printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
+ audit_msg, *sptep);
+ dump_stack();
+ }
+ }
+
+}
+
+void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
+{
+ mmu_spte_walk(vcpu, inspect_spte_has_rmap);
+}
+
+static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
{
- int nmaps = 0;
struct kvm_mmu_page *sp;
int i;
@@ -3128,20 +3243,16 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
continue;
if (!(ent & PT_WRITABLE_MASK))
continue;
- ++nmaps;
+ inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
}
}
- return nmaps;
+ return;
}
static void audit_rmap(struct kvm_vcpu *vcpu)
{
- int n_rmap = count_rmaps(vcpu);
- int n_actual = count_writable_mappings(vcpu);
-
- if (n_rmap != n_actual)
- printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
- __func__, audit_msg, n_rmap, n_actual);
+ check_writable_mappings_rmap(vcpu);
+ count_rmaps(vcpu);
}
static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -3149,20 +3260,28 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
struct kvm_mmu_page *sp;
struct kvm_memory_slot *slot;
unsigned long *rmapp;
+ u64 *spte;
gfn_t gfn;
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
if (sp->role.direct)
continue;
+ if (sp->unsync)
+ continue;
gfn = unalias_gfn(vcpu->kvm, sp->gfn);
slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
rmapp = &slot->rmap[gfn - slot->base_gfn];
- if (*rmapp)
- printk(KERN_ERR "%s: (%s) shadow page has writable"
- " mappings: gfn %lx role %x\n",
+
+ spte = rmap_next(vcpu->kvm, rmapp, NULL);
+ while (spte) {
+ if (*spte & PT_WRITABLE_MASK)
+ printk(KERN_ERR "%s: (%s) shadow page has "
+ "writable mappings: gfn %lx role %x\n",
__func__, audit_msg, sp->gfn,
sp->role.word);
+ spte = rmap_next(vcpu->kvm, rmapp, spte);
+ }
}
}
@@ -3174,7 +3293,9 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
audit_msg = msg;
audit_rmap(vcpu);
audit_write_protection(vcpu);
- audit_mappings(vcpu);
+ if (strcmp("pre pte write", audit_msg) != 0)
+ audit_mappings(vcpu);
+ audit_writable_sptes_have_rmaps(vcpu);
dbg = olddbg;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 3494a2fb136e..61a1b3884b49 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -37,6 +37,8 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
+
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
@@ -75,7 +77,7 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
return vcpu->arch.cr0 & X86_CR0_PG;
}
-static inline int is_present_pte(unsigned long pte)
+static inline int is_present_gpte(unsigned long pte)
{
return pte & PT_PRESENT_MASK;
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 258e4591e1ca..53e129cec5fd 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -131,8 +131,8 @@ walk:
pte = vcpu->arch.cr3;
#if PTTYPE == 64
if (!is_long_mode(vcpu)) {
- pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
- if (!is_present_pte(pte))
+ pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
+ if (!is_present_gpte(pte))
goto not_present;
--walker->level;
}
@@ -155,7 +155,7 @@ walk:
kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
- if (!is_present_pte(pte))
+ if (!is_present_gpte(pte))
goto not_present;
rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
@@ -205,7 +205,7 @@ walk:
--walker->level;
}
- if (write_fault && !is_dirty_pte(pte)) {
+ if (write_fault && !is_dirty_gpte(pte)) {
bool ret;
mark_page_dirty(vcpu->kvm, table_gfn);
@@ -252,8 +252,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!is_present_pte(gpte))
- set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
+ if (!is_present_gpte(gpte))
+ __set_spte(spte, shadow_notrap_nonpresent_pte);
return;
}
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -281,7 +281,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *shadow_page;
- u64 spte, *sptep;
+ u64 spte, *sptep = NULL;
int direct;
gfn_t table_gfn;
int r;
@@ -289,7 +289,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
pt_element_t curr_pte;
struct kvm_shadow_walk_iterator iterator;
- if (!is_present_pte(gw->ptes[gw->level - 1]))
+ if (!is_present_gpte(gw->ptes[gw->level - 1]))
return NULL;
for_each_shadow_entry(vcpu, addr, iterator) {
@@ -311,14 +311,14 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (is_large_pte(*sptep)) {
rmap_remove(vcpu->kvm, sptep);
- set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+ __set_spte(sptep, shadow_trap_nonpresent_pte);
kvm_flush_remote_tlbs(vcpu->kvm);
}
if (level == PT_DIRECTORY_LEVEL
&& gw->level == PT_DIRECTORY_LEVEL) {
direct = 1;
- if (!is_dirty_pte(gw->ptes[level - 1]))
+ if (!is_dirty_gpte(gw->ptes[level - 1]))
access &= ~ACC_WRITE_MASK;
table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
} else {
@@ -369,7 +369,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int user_fault = error_code & PFERR_USER_MASK;
int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
- u64 *shadow_pte;
+ u64 *sptep;
int write_pt = 0;
int r;
pfn_t pfn;
@@ -401,7 +401,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (walker.level == PT_DIRECTORY_LEVEL) {
gfn_t large_gfn;
- large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
+ large_gfn = walker.gfn &
+ ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
if (is_largepage_backed(vcpu, large_gfn)) {
walker.gfn = large_gfn;
largepage = 1;
@@ -422,11 +423,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
- shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- largepage, &write_pt, pfn);
+ sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+ largepage, &write_pt, pfn);
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
- shadow_pte, *shadow_pte, write_pt);
+ sptep, *sptep, write_pt);
if (!write_pt)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
@@ -472,7 +473,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
--vcpu->kvm->stat.lpages;
need_flush = 1;
}
- set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+ __set_spte(sptep, shadow_trap_nonpresent_pte);
break;
}
@@ -489,7 +490,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
return;
- if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
+ if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
if (mmu_topup_memory_caches(vcpu))
return;
kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
@@ -536,7 +537,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
for (j = 0; j < ARRAY_SIZE(pt); ++j)
- if (r || is_present_pte(pt[j]))
+ if (r || is_present_gpte(pt[j]))
sp->spt[i+j] = shadow_trap_nonpresent_pte;
else
sp->spt[i+j] = shadow_notrap_nonpresent_pte;
@@ -574,23 +575,23 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sizeof(pt_element_t)))
return -EINVAL;
- if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
+ if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
!(gpte & PT_ACCESSED_MASK)) {
u64 nonpresent;
rmap_remove(vcpu->kvm, &sp->spt[i]);
- if (is_present_pte(gpte))
+ if (is_present_gpte(gpte))
nonpresent = shadow_trap_nonpresent_pte;
else
nonpresent = shadow_notrap_nonpresent_pte;
- set_shadow_pte(&sp->spt[i], nonpresent);
+ __set_spte(&sp->spt[i], nonpresent);
continue;
}
nr_present++;
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
- is_dirty_pte(gpte), 0, gfn,
+ is_dirty_gpte(gpte), 0, gfn,
spte_to_pfn(sp->spt[i]), true, false);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 71510e07e69e..fc14bdf60d39 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -15,7 +15,6 @@
*/
#include <linux/kvm_host.h>
-#include "kvm_svm.h"
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
@@ -26,10 +25,12 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/sched.h>
+#include <linux/ftrace_event.h>
#include <asm/desc.h>
#include <asm/virtext.h>
+#include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -57,6 +58,46 @@ MODULE_LICENSE("GPL");
#define nsvm_printk(fmt, args...) do {} while(0)
#endif
+static const u32 host_save_user_msrs[] = {
+#ifdef CONFIG_X86_64
+ MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
+ MSR_FS_BASE,
+#endif
+ MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
+};
+
+#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
+
+struct kvm_vcpu;
+
+struct vcpu_svm {
+ struct kvm_vcpu vcpu;
+ struct vmcb *vmcb;
+ unsigned long vmcb_pa;
+ struct svm_cpu_data *svm_data;
+ uint64_t asid_generation;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+
+ u64 next_rip;
+
+ u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
+ u64 host_gs_base;
+
+ u32 *msrpm;
+ struct vmcb *hsave;
+ u64 hsave_msr;
+
+ u64 nested_vmcb;
+
+ /* These are the merged vectors */
+ u32 *nested_msrpm;
+
+ /* gpa pointers to the real vectors */
+ u64 nested_vmcb_msrpm;
+};
+
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
@@ -147,19 +188,6 @@ static inline void invlpga(unsigned long addr, u32 asid)
asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
}
-static inline unsigned long kvm_read_cr2(void)
-{
- unsigned long cr2;
-
- asm volatile ("mov %%cr2, %0" : "=r" (cr2));
- return cr2;
-}
-
-static inline void kvm_write_cr2(unsigned long val)
-{
- asm volatile ("mov %0, %%cr2" :: "r" (val));
-}
-
static inline void force_new_asid(struct kvm_vcpu *vcpu)
{
to_svm(vcpu)->asid_generation--;
@@ -605,7 +633,7 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
init_vmcb(svm);
- if (vcpu->vcpu_id != 0) {
+ if (!kvm_vcpu_is_bsp(vcpu)) {
kvm_rip_write(vcpu, 0);
svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
@@ -669,7 +697,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
fx_init(&svm->vcpu);
svm->vcpu.fpu_active = 1;
svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
- if (svm->vcpu.vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
return &svm->vcpu;
@@ -739,6 +767,18 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
+static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+ switch (reg) {
+ case VCPU_EXREG_PDPTR:
+ BUG_ON(!npt_enabled);
+ load_pdptrs(vcpu, vcpu->arch.cr3);
+ break;
+ default:
+ BUG();
+ }
+}
+
static void svm_set_vintr(struct vcpu_svm *svm)
{
svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
@@ -1061,7 +1101,6 @@ static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
val = 0;
}
- KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
return val;
}
@@ -1070,8 +1109,6 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
{
struct vcpu_svm *svm = to_svm(vcpu);
- KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
-
*exception = 0;
switch (dr) {
@@ -1119,14 +1156,7 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1;
- if (!npt_enabled)
- KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
- (u32)fault_address, (u32)(fault_address >> 32),
- handler);
- else
- KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
- (u32)fault_address, (u32)(fault_address >> 32),
- handler);
+ trace_kvm_page_fault(fault_address, error_code);
/*
* FIXME: Tis shouldn't be necessary here, but there is a flush
* missing in the MMU code. Until we find this bug, flush the
@@ -1253,14 +1283,12 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
- KVMTRACE_0D(NMI, &svm->vcpu, handler);
return 1;
}
static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
++svm->vcpu.stat.irq_exits;
- KVMTRACE_0D(INTR, &svm->vcpu, handler);
return 1;
}
@@ -1577,7 +1605,8 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
/* Kill any pending exceptions */
if (svm->vcpu.arch.exception.pending == true)
nsvm_printk("WARNING: Pending Exception\n");
- svm->vcpu.arch.exception.pending = false;
+ kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(&svm->vcpu);
/* Restore selected save entries */
svm->vmcb->save.es = hsave->save.es;
@@ -1645,7 +1674,8 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->nested_vmcb = svm->vmcb->save.rax;
/* Clear internal status */
- svm->vcpu.arch.exception.pending = false;
+ kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(&svm->vcpu);
/* Save the old vmcb, so we don't need to pick what we save, but
can restore everything when a VMEXIT occurs */
@@ -1845,6 +1875,19 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return 1;
}
+static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ nsvm_printk("INVLPGA\n");
+
+ /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
+ kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
+
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+ skip_emulated_instruction(&svm->vcpu);
+ return 1;
+}
+
static int invalid_op_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
@@ -1953,7 +1996,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
struct vcpu_svm *svm = to_svm(vcpu);
switch (ecx) {
- case MSR_IA32_TIME_STAMP_COUNTER: {
+ case MSR_IA32_TSC: {
u64 tsc;
rdtscll(tsc);
@@ -1978,13 +2021,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
break;
#endif
case MSR_IA32_SYSENTER_CS:
- *data = svm->vmcb->save.sysenter_cs;
+ *data = svm->sysenter_cs;
break;
case MSR_IA32_SYSENTER_EIP:
- *data = svm->vmcb->save.sysenter_eip;
+ *data = svm->sysenter_eip;
break;
case MSR_IA32_SYSENTER_ESP:
- *data = svm->vmcb->save.sysenter_esp;
+ *data = svm->sysenter_esp;
break;
/* Nobody will change the following 5 values in the VMCB so
we can safely return them on rdmsr. They will always be 0
@@ -2027,8 +2070,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
if (svm_get_msr(&svm->vcpu, ecx, &data))
kvm_inject_gp(&svm->vcpu, 0);
else {
- KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
- (u32)(data >> 32), handler);
+ trace_kvm_msr_read(ecx, data);
svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
@@ -2043,7 +2085,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
struct vcpu_svm *svm = to_svm(vcpu);
switch (ecx) {
- case MSR_IA32_TIME_STAMP_COUNTER: {
+ case MSR_IA32_TSC: {
u64 tsc;
rdtscll(tsc);
@@ -2068,13 +2110,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
break;
#endif
case MSR_IA32_SYSENTER_CS:
- svm->vmcb->save.sysenter_cs = data;
+ svm->sysenter_cs = data;
break;
case MSR_IA32_SYSENTER_EIP:
- svm->vmcb->save.sysenter_eip = data;
+ svm->sysenter_eip = data;
break;
case MSR_IA32_SYSENTER_ESP:
- svm->vmcb->save.sysenter_esp = data;
+ svm->sysenter_esp = data;
break;
case MSR_IA32_DEBUGCTLMSR:
if (!svm_has(SVM_FEATURE_LBRV)) {
@@ -2091,25 +2133,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
else
svm_disable_lbrv(svm);
break;
- case MSR_K7_EVNTSEL0:
- case MSR_K7_EVNTSEL1:
- case MSR_K7_EVNTSEL2:
- case MSR_K7_EVNTSEL3:
- case MSR_K7_PERFCTR0:
- case MSR_K7_PERFCTR1:
- case MSR_K7_PERFCTR2:
- case MSR_K7_PERFCTR3:
- /*
- * Just discard all writes to the performance counters; this
- * should keep both older linux and windows 64-bit guests
- * happy
- */
- pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
-
- break;
case MSR_VM_HSAVE_PA:
svm->hsave_msr = data;
break;
+ case MSR_VM_CR:
+ case MSR_VM_IGNNE:
+ pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
+ break;
default:
return kvm_set_msr_common(vcpu, ecx, data);
}
@@ -2122,8 +2152,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
- KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
- handler);
+ trace_kvm_msr_write(ecx, data);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
if (svm_set_msr(&svm->vcpu, ecx, data))
@@ -2144,8 +2173,6 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int interrupt_window_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
-
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
/*
@@ -2201,7 +2228,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_INVD] = emulate_on_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
- [SVM_EXIT_INVLPGA] = invalid_op_interception,
+ [SVM_EXIT_INVLPGA] = invlpga_interception,
[SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
@@ -2224,8 +2251,7 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u32 exit_code = svm->vmcb->control.exit_code;
- KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
- (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
+ trace_kvm_exit(exit_code, svm->vmcb->save.rip);
if (is_nested(svm)) {
nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
@@ -2246,12 +2272,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
vcpu->arch.cr0 = svm->vmcb->save.cr0;
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
- if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- }
if (mmu_reload) {
kvm_mmu_reset_context(vcpu);
kvm_mmu_load(vcpu);
@@ -2319,7 +2339,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
{
struct vmcb_control_area *control;
- KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
+ trace_kvm_inj_virq(irq);
++svm->vcpu.stat.irq_injections;
control = &svm->vmcb->control;
@@ -2329,21 +2349,14 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
}
-static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- svm->vmcb->control.event_inj = nr |
- SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
-}
-
static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- nested_svm_intr(svm);
+ BUG_ON(!(svm->vcpu.arch.hflags & HF_GIF_MASK));
- svm_queue_irq(vcpu, vcpu->arch.interrupt.nr);
+ svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
+ SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
@@ -2371,13 +2384,25 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
return (vmcb->save.rflags & X86_EFLAGS_IF) &&
!(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
- (svm->vcpu.arch.hflags & HF_GIF_MASK);
+ (svm->vcpu.arch.hflags & HF_GIF_MASK) &&
+ !is_nested(svm);
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
- svm_set_vintr(to_svm(vcpu));
- svm_inject_irq(to_svm(vcpu), 0x0);
+ struct vcpu_svm *svm = to_svm(vcpu);
+ nsvm_printk("Trying to open IRQ window\n");
+
+ nested_svm_intr(svm);
+
+ /* In case GIF=0 we can't rely on the CPU to tell us when
+ * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
+ * The next time we get that intercept, this function will be
+ * called again though and we'll get the vintr intercept. */
+ if (svm->vcpu.arch.hflags & HF_GIF_MASK) {
+ svm_set_vintr(svm);
+ svm_inject_irq(svm, 0x0);
+ }
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
@@ -2456,6 +2481,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
case SVM_EXITINTINFO_TYPE_EXEPT:
/* In case of software exception do not reinject an exception
vector, but re-execute and instruction instead */
+ if (is_nested(svm))
+ break;
if (kvm_exception_is_soft(vector))
break;
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
@@ -2498,7 +2525,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
fs_selector = kvm_read_fs();
gs_selector = kvm_read_gs();
ldt_selector = kvm_read_ldt();
- svm->host_cr2 = kvm_read_cr2();
if (!is_nested(svm))
svm->vmcb->save.cr2 = vcpu->arch.cr2;
/* required for live migration with NPT */
@@ -2585,8 +2611,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
- kvm_write_cr2(svm->host_cr2);
-
kvm_load_fs(fs_selector);
kvm_load_gs(gs_selector);
kvm_load_ldt(ldt_selector);
@@ -2602,6 +2626,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
svm->next_rip = 0;
+ if (npt_enabled) {
+ vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
+ vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
+ }
+
svm_complete_interrupts(svm);
}
@@ -2673,6 +2702,59 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return 0;
}
+static const struct trace_print_flags svm_exit_reasons_str[] = {
+ { SVM_EXIT_READ_CR0, "read_cr0" },
+ { SVM_EXIT_READ_CR3, "read_cr3" },
+ { SVM_EXIT_READ_CR4, "read_cr4" },
+ { SVM_EXIT_READ_CR8, "read_cr8" },
+ { SVM_EXIT_WRITE_CR0, "write_cr0" },
+ { SVM_EXIT_WRITE_CR3, "write_cr3" },
+ { SVM_EXIT_WRITE_CR4, "write_cr4" },
+ { SVM_EXIT_WRITE_CR8, "write_cr8" },
+ { SVM_EXIT_READ_DR0, "read_dr0" },
+ { SVM_EXIT_READ_DR1, "read_dr1" },
+ { SVM_EXIT_READ_DR2, "read_dr2" },
+ { SVM_EXIT_READ_DR3, "read_dr3" },
+ { SVM_EXIT_WRITE_DR0, "write_dr0" },
+ { SVM_EXIT_WRITE_DR1, "write_dr1" },
+ { SVM_EXIT_WRITE_DR2, "write_dr2" },
+ { SVM_EXIT_WRITE_DR3, "write_dr3" },
+ { SVM_EXIT_WRITE_DR5, "write_dr5" },
+ { SVM_EXIT_WRITE_DR7, "write_dr7" },
+ { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
+ { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
+ { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
+ { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
+ { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
+ { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
+ { SVM_EXIT_INTR, "interrupt" },
+ { SVM_EXIT_NMI, "nmi" },
+ { SVM_EXIT_SMI, "smi" },
+ { SVM_EXIT_INIT, "init" },
+ { SVM_EXIT_VINTR, "vintr" },
+ { SVM_EXIT_CPUID, "cpuid" },
+ { SVM_EXIT_INVD, "invd" },
+ { SVM_EXIT_HLT, "hlt" },
+ { SVM_EXIT_INVLPG, "invlpg" },
+ { SVM_EXIT_INVLPGA, "invlpga" },
+ { SVM_EXIT_IOIO, "io" },
+ { SVM_EXIT_MSR, "msr" },
+ { SVM_EXIT_TASK_SWITCH, "task_switch" },
+ { SVM_EXIT_SHUTDOWN, "shutdown" },
+ { SVM_EXIT_VMRUN, "vmrun" },
+ { SVM_EXIT_VMMCALL, "hypercall" },
+ { SVM_EXIT_VMLOAD, "vmload" },
+ { SVM_EXIT_VMSAVE, "vmsave" },
+ { SVM_EXIT_STGI, "stgi" },
+ { SVM_EXIT_CLGI, "clgi" },
+ { SVM_EXIT_SKINIT, "skinit" },
+ { SVM_EXIT_WBINVD, "wbinvd" },
+ { SVM_EXIT_MONITOR, "monitor" },
+ { SVM_EXIT_MWAIT, "mwait" },
+ { SVM_EXIT_NPF, "npf" },
+ { -1, NULL }
+};
+
static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -2710,6 +2792,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_gdt = svm_set_gdt,
.get_dr = svm_get_dr,
.set_dr = svm_set_dr,
+ .cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
@@ -2733,6 +2816,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
.get_mt_mask = svm_get_mt_mask,
+
+ .exit_reasons_str = svm_exit_reasons_str,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index 86dbac072d0c..eea40439066c 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -9,12 +9,16 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
int restart_timer = 0;
wait_queue_head_t *q = &vcpu->wq;
- /* FIXME: this code should not know anything about vcpus */
- if (!atomic_inc_and_test(&ktimer->pending))
+ /*
+ * There is a race window between reading and incrementing, but we do
+ * not care about potentially loosing timer events in the !reinject
+ * case anyway.
+ */
+ if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
+ atomic_inc(&ktimer->pending);
+ /* FIXME: this code should not know anything about vcpus */
set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
-
- if (!ktimer->reinject)
- atomic_set(&ktimer->pending, 1);
+ }
if (waitqueue_active(q))
wake_up_interruptible(q);
@@ -33,7 +37,7 @@ enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
struct kvm_vcpu *vcpu;
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
- vcpu = ktimer->kvm->vcpus[ktimer->vcpu_id];
+ vcpu = ktimer->vcpu;
if (!vcpu)
return HRTIMER_NORESTART;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
new file mode 100644
index 000000000000..6c2c87fa6e4f
--- /dev/null
+++ b/arch/x86/kvm/trace.h
@@ -0,0 +1,270 @@
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH arch/x86/kvm
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoint for guest mode entry.
+ */
+TRACE_EVENT(kvm_entry,
+ TP_PROTO(unsigned int vcpu_id),
+ TP_ARGS(vcpu_id),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ ),
+
+ TP_printk("vcpu %u", __entry->vcpu_id)
+);
+
+/*
+ * Tracepoint for hypercall.
+ */
+TRACE_EVENT(kvm_hypercall,
+ TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3),
+ TP_ARGS(nr, a0, a1, a2, a3),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, nr )
+ __field( unsigned long, a0 )
+ __field( unsigned long, a1 )
+ __field( unsigned long, a2 )
+ __field( unsigned long, a3 )
+ ),
+
+ TP_fast_assign(
+ __entry->nr = nr;
+ __entry->a0 = a0;
+ __entry->a1 = a1;
+ __entry->a2 = a2;
+ __entry->a3 = a3;
+ ),
+
+ TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
+ __entry->nr, __entry->a0, __entry->a1, __entry->a2,
+ __entry->a3)
+);
+
+/*
+ * Tracepoint for PIO.
+ */
+TRACE_EVENT(kvm_pio,
+ TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
+ unsigned int count),
+ TP_ARGS(rw, port, size, count),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, rw )
+ __field( unsigned int, port )
+ __field( unsigned int, size )
+ __field( unsigned int, count )
+ ),
+
+ TP_fast_assign(
+ __entry->rw = rw;
+ __entry->port = port;
+ __entry->size = size;
+ __entry->count = count;
+ ),
+
+ TP_printk("pio_%s at 0x%x size %d count %d",
+ __entry->rw ? "write" : "read",
+ __entry->port, __entry->size, __entry->count)
+);
+
+/*
+ * Tracepoint for cpuid.
+ */
+TRACE_EVENT(kvm_cpuid,
+ TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
+ unsigned long rcx, unsigned long rdx),
+ TP_ARGS(function, rax, rbx, rcx, rdx),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, function )
+ __field( unsigned long, rax )
+ __field( unsigned long, rbx )
+ __field( unsigned long, rcx )
+ __field( unsigned long, rdx )
+ ),
+
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->rax = rax;
+ __entry->rbx = rbx;
+ __entry->rcx = rcx;
+ __entry->rdx = rdx;
+ ),
+
+ TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
+ __entry->function, __entry->rax,
+ __entry->rbx, __entry->rcx, __entry->rdx)
+);
+
+#define AREG(x) { APIC_##x, "APIC_" #x }
+
+#define kvm_trace_symbol_apic \
+ AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \
+ AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \
+ AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
+ AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \
+ AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \
+ AREG(ECTRL)
+/*
+ * Tracepoint for apic access.
+ */
+TRACE_EVENT(kvm_apic,
+ TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
+ TP_ARGS(rw, reg, val),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, rw )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __entry->rw = rw;
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("apic_%s %s = 0x%x",
+ __entry->rw ? "write" : "read",
+ __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
+ __entry->val)
+);
+
+#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
+#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
+
+/*
+ * Tracepoint for kvm guest exit:
+ */
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(unsigned int exit_reason, unsigned long guest_rip),
+ TP_ARGS(exit_reason, guest_rip),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, exit_reason )
+ __field( unsigned long, guest_rip )
+ ),
+
+ TP_fast_assign(
+ __entry->exit_reason = exit_reason;
+ __entry->guest_rip = guest_rip;
+ ),
+
+ TP_printk("reason %s rip 0x%lx",
+ ftrace_print_symbols_seq(p, __entry->exit_reason,
+ kvm_x86_ops->exit_reasons_str),
+ __entry->guest_rip)
+);
+
+/*
+ * Tracepoint for kvm interrupt injection:
+ */
+TRACE_EVENT(kvm_inj_virq,
+ TP_PROTO(unsigned int irq),
+ TP_ARGS(irq),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ ),
+
+ TP_printk("irq %u", __entry->irq)
+);
+
+/*
+ * Tracepoint for page fault.
+ */
+TRACE_EVENT(kvm_page_fault,
+ TP_PROTO(unsigned long fault_address, unsigned int error_code),
+ TP_ARGS(fault_address, error_code),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, fault_address )
+ __field( unsigned int, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->fault_address = fault_address;
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("address %lx error_code %x",
+ __entry->fault_address, __entry->error_code)
+);
+
+/*
+ * Tracepoint for guest MSR access.
+ */
+TRACE_EVENT(kvm_msr,
+ TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data),
+ TP_ARGS(rw, ecx, data),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, rw )
+ __field( unsigned int, ecx )
+ __field( unsigned long, data )
+ ),
+
+ TP_fast_assign(
+ __entry->rw = rw;
+ __entry->ecx = ecx;
+ __entry->data = data;
+ ),
+
+ TP_printk("msr_%s %x = 0x%lx",
+ __entry->rw ? "write" : "read",
+ __entry->ecx, __entry->data)
+);
+
+#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data)
+#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data)
+
+/*
+ * Tracepoint for guest CR access.
+ */
+TRACE_EVENT(kvm_cr,
+ TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
+ TP_ARGS(rw, cr, val),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, rw )
+ __field( unsigned int, cr )
+ __field( unsigned long, val )
+ ),
+
+ TP_fast_assign(
+ __entry->rw = rw;
+ __entry->cr = cr;
+ __entry->val = val;
+ ),
+
+ TP_printk("cr_%s %x = 0x%lx",
+ __entry->rw ? "write" : "read",
+ __entry->cr, __entry->val)
+);
+
+#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
+#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e770bf349ec4..3a75db303283 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -25,6 +25,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include <linux/ftrace_event.h>
#include "kvm_cache_regs.h"
#include "x86.h"
@@ -34,6 +35,8 @@
#include <asm/virtext.h>
#include <asm/mce.h>
+#include "trace.h"
+
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet");
@@ -51,6 +54,10 @@ module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
static int __read_mostly enable_ept = 1;
module_param_named(ept, enable_ept, bool, S_IRUGO);
+static int __read_mostly enable_unrestricted_guest = 1;
+module_param_named(unrestricted_guest,
+ enable_unrestricted_guest, bool, S_IRUGO);
+
static int __read_mostly emulate_invalid_guest_state = 0;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
@@ -84,6 +91,14 @@ struct vcpu_vmx {
int guest_efer_loaded;
} host_state;
struct {
+ int vm86_active;
+ u8 save_iopl;
+ struct kvm_save_segment {
+ u16 selector;
+ unsigned long base;
+ u32 limit;
+ u32 ar;
+ } tr, es, ds, fs, gs;
struct {
bool pending;
u8 vector;
@@ -161,6 +176,8 @@ static struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR),
};
+static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
+
/*
* Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
* away by decrementing the array size.
@@ -256,6 +273,26 @@ static inline bool cpu_has_vmx_flexpriority(void)
cpu_has_vmx_virtualize_apic_accesses();
}
+static inline bool cpu_has_vmx_ept_execute_only(void)
+{
+ return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
+}
+
+static inline bool cpu_has_vmx_eptp_uncacheable(void)
+{
+ return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
+}
+
+static inline bool cpu_has_vmx_eptp_writeback(void)
+{
+ return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
+}
+
+static inline bool cpu_has_vmx_ept_2m_page(void)
+{
+ return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
+}
+
static inline int cpu_has_vmx_invept_individual_addr(void)
{
return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
@@ -277,6 +314,12 @@ static inline int cpu_has_vmx_ept(void)
SECONDARY_EXEC_ENABLE_EPT;
}
+static inline int cpu_has_vmx_unrestricted_guest(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
+}
+
static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
{
return flexpriority_enabled &&
@@ -504,7 +547,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
eb |= 1u << BP_VECTOR;
}
- if (vcpu->arch.rmode.vm86_active)
+ if (to_vmx(vcpu)->rmode.vm86_active)
eb = ~0;
if (enable_ept)
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
@@ -740,7 +783,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- if (vcpu->arch.rmode.vm86_active)
+ if (to_vmx(vcpu)->rmode.vm86_active)
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, rflags);
}
@@ -797,12 +840,13 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
- if (vcpu->arch.rmode.vm86_active) {
+ if (vmx->rmode.vm86_active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = nr;
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
- if (nr == BP_VECTOR || nr == OF_VECTOR)
- vmx->rmode.irq.rip++;
+ if (kvm_exception_is_soft(nr))
+ vmx->rmode.irq.rip +=
+ vmx->vcpu.arch.event_exit_inst_len;
intr_info |= INTR_TYPE_SOFT_INTR;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
@@ -940,7 +984,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_index, pdata);
#endif
- case MSR_IA32_TIME_STAMP_COUNTER:
+ case MSR_IA32_TSC:
data = guest_read_tsc();
break;
case MSR_IA32_SYSENTER_CS:
@@ -1000,22 +1044,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
- case MSR_IA32_TIME_STAMP_COUNTER:
+ case MSR_IA32_TSC:
rdtscll(host_tsc);
guest_write_tsc(data, host_tsc);
break;
- case MSR_P6_PERFCTR0:
- case MSR_P6_PERFCTR1:
- case MSR_P6_EVNTSEL0:
- case MSR_P6_EVNTSEL1:
- /*
- * Just discard all writes to the performance counters; this
- * should keep both older linux and windows 64-bit guests
- * happy
- */
- pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
-
- break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, data);
@@ -1046,6 +1078,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
case VCPU_REGS_RIP:
vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
break;
+ case VCPU_EXREG_PDPTR:
+ if (enable_ept)
+ ept_save_pdptrs(vcpu);
+ break;
default:
break;
}
@@ -1203,7 +1239,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_ENABLE_VPID |
- SECONDARY_EXEC_ENABLE_EPT;
+ SECONDARY_EXEC_ENABLE_EPT |
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -1333,8 +1370,13 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_vpid())
enable_vpid = 0;
- if (!cpu_has_vmx_ept())
+ if (!cpu_has_vmx_ept()) {
enable_ept = 0;
+ enable_unrestricted_guest = 0;
+ }
+
+ if (!cpu_has_vmx_unrestricted_guest())
+ enable_unrestricted_guest = 0;
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -1342,6 +1384,9 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_tpr_shadow())
kvm_x86_ops->update_cr8_intercept = NULL;
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+
return alloc_kvm_area();
}
@@ -1372,15 +1417,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->emulation_required = 1;
- vcpu->arch.rmode.vm86_active = 0;
+ vmx->rmode.vm86_active = 0;
- vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
- vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
- vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
+ vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
+ vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
+ vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
- flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
+ flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1391,10 +1436,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
if (emulate_invalid_guest_state)
return;
- fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
- fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
- fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
- fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
+ fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
+ fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
+ fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
+ fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
vmcs_write16(GUEST_SS_SELECTOR, 0);
vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -1433,20 +1478,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (enable_unrestricted_guest)
+ return;
+
vmx->emulation_required = 1;
- vcpu->arch.rmode.vm86_active = 1;
+ vmx->rmode.vm86_active = 1;
- vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
+ vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
- vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
+ vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
- vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
+ vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vcpu->arch.rmode.save_iopl
+ vmx->rmode.save_iopl
= (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
@@ -1468,10 +1516,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_CS_BASE, 0xf0000);
vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
- fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
- fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
- fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
- fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
+ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
+ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
+ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
+ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
continue_rmode:
kvm_mmu_reset_context(vcpu);
@@ -1545,11 +1593,11 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
+ if (!test_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_dirty))
+ return;
+
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
- if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_ERR "EPT: Fail to load pdptrs!\n");
- return;
- }
vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
@@ -1557,6 +1605,21 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
}
}
+static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+{
+ if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
+ vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
+ vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
+ vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
+ vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
+ }
+
+ __set_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -1571,7 +1634,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, vcpu->arch.cr4);
- *hw_cr0 |= X86_CR0_PE | X86_CR0_PG;
*hw_cr0 &= ~X86_CR0_WP;
} else if (!is_paging(vcpu)) {
/* From nonpaging to paging */
@@ -1598,15 +1660,21 @@ static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
- unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) |
- KVM_VM_CR0_ALWAYS_ON;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long hw_cr0;
+
+ if (enable_unrestricted_guest)
+ hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
+ | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else
+ hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
vmx_fpu_deactivate(vcpu);
- if (vcpu->arch.rmode.vm86_active && (cr0 & X86_CR0_PE))
+ if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu);
- if (!vcpu->arch.rmode.vm86_active && !(cr0 & X86_CR0_PE))
+ if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
enter_rmode(vcpu);
#ifdef CONFIG_X86_64
@@ -1650,8 +1718,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (enable_ept) {
eptp = construct_eptp(cr3);
vmcs_write64(EPT_POINTER, eptp);
- ept_sync_context(eptp);
- ept_load_pdptrs(vcpu);
guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
VMX_EPT_IDENTITY_PAGETABLE_ADDR;
}
@@ -1664,7 +1730,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.vm86_active ?
+ unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
vcpu->arch.cr4 = cr4;
@@ -1744,20 +1810,21 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
u32 ar;
- if (vcpu->arch.rmode.vm86_active && seg == VCPU_SREG_TR) {
- vcpu->arch.rmode.tr.selector = var->selector;
- vcpu->arch.rmode.tr.base = var->base;
- vcpu->arch.rmode.tr.limit = var->limit;
- vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
+ if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
+ vmx->rmode.tr.selector = var->selector;
+ vmx->rmode.tr.base = var->base;
+ vmx->rmode.tr.limit = var->limit;
+ vmx->rmode.tr.ar = vmx_segment_access_rights(var);
return;
}
vmcs_writel(sf->base, var->base);
vmcs_write32(sf->limit, var->limit);
vmcs_write16(sf->selector, var->selector);
- if (vcpu->arch.rmode.vm86_active && var->s) {
+ if (vmx->rmode.vm86_active && var->s) {
/*
* Hack real-mode segments into vm86 compatibility.
*/
@@ -1766,6 +1833,21 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
ar = 0xf3;
} else
ar = vmx_segment_access_rights(var);
+
+ /*
+ * Fix the "Accessed" bit in AR field of segment registers for older
+ * qemu binaries.
+ * IA32 arch specifies that at the time of processor reset the
+ * "Accessed" bit in the AR field of segment registers is 1. And qemu
+ * is setting it to 0 in the usedland code. This causes invalid guest
+ * state vmexit when "unrestricted guest" mode is turned on.
+ * Fix for this setup issue in cpu_reset is being pushed in the qemu
+ * tree. Newer qemu binaries with that qemu fix would not need this
+ * kvm hack.
+ */
+ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
+ ar |= 0x1; /* Accessed */
+
vmcs_write32(sf->ar_bytes, ar);
}
@@ -2062,11 +2144,19 @@ out:
static void seg_setup(int seg)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ unsigned int ar;
vmcs_write16(sf->selector, 0);
vmcs_writel(sf->base, 0);
vmcs_write32(sf->limit, 0xffff);
- vmcs_write32(sf->ar_bytes, 0xf3);
+ if (enable_unrestricted_guest) {
+ ar = 0x93;
+ if (seg == VCPU_SREG_CS)
+ ar |= 0x08; /* code segment */
+ } else
+ ar = 0xf3;
+
+ vmcs_write32(sf->ar_bytes, ar);
}
static int alloc_apic_access_page(struct kvm *kvm)
@@ -2209,6 +2299,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
if (!enable_ept)
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ if (!enable_unrestricted_guest)
+ exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -2326,14 +2418,14 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
goto out;
}
- vmx->vcpu.arch.rmode.vm86_active = 0;
+ vmx->rmode.vm86_active = 0;
vmx->soft_vnmi_blocked = 0;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
- if (vmx->vcpu.vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(&vmx->vcpu))
msr |= MSR_IA32_APICBASE_BSP;
kvm_set_apic_base(&vmx->vcpu, msr);
@@ -2344,7 +2436,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
* insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
*/
- if (vmx->vcpu.vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
vmcs_writel(GUEST_CS_BASE, 0x000f0000);
} else {
@@ -2373,7 +2465,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_SYSENTER_EIP, 0);
vmcs_writel(GUEST_RFLAGS, 0x02);
- if (vmx->vcpu.vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(&vmx->vcpu))
kvm_rip_write(vcpu, 0xfff0);
else
kvm_rip_write(vcpu, 0);
@@ -2461,13 +2553,16 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu)
uint32_t intr;
int irq = vcpu->arch.interrupt.nr;
- KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
+ trace_kvm_inj_virq(irq);
++vcpu->stat.irq_injections;
- if (vcpu->arch.rmode.vm86_active) {
+ if (vmx->rmode.vm86_active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = irq;
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
+ if (vcpu->arch.interrupt.soft)
+ vmx->rmode.irq.rip +=
+ vmx->vcpu.arch.event_exit_inst_len;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
@@ -2502,7 +2597,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
}
++vcpu->stat.nmi_injections;
- if (vcpu->arch.rmode.vm86_active) {
+ if (vmx->rmode.vm86_active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = NMI_VECTOR;
vmx->rmode.irq.rip = kvm_rip_read(vcpu);
@@ -2659,14 +2754,14 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (enable_ept)
BUG();
cr2 = vmcs_readl(EXIT_QUALIFICATION);
- KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
- (u32)((u64)cr2 >> 32), handler);
+ trace_kvm_page_fault(cr2, error_code);
+
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
}
- if (vcpu->arch.rmode.vm86_active &&
+ if (vmx->rmode.vm86_active &&
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
error_code)) {
if (vcpu->arch.halt_request) {
@@ -2707,7 +2802,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
++vcpu->stat.irq_exits;
- KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
return 1;
}
@@ -2755,7 +2849,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- unsigned long exit_qualification;
+ unsigned long exit_qualification, val;
int cr;
int reg;
@@ -2764,21 +2858,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
- KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
- (u32)kvm_register_read(vcpu, reg),
- (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
- handler);
+ val = kvm_register_read(vcpu, reg);
+ trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
- kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
+ kvm_set_cr0(vcpu, val);
skip_emulated_instruction(vcpu);
return 1;
case 3:
- kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
+ kvm_set_cr3(vcpu, val);
skip_emulated_instruction(vcpu);
return 1;
case 4:
- kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
+ kvm_set_cr4(vcpu, val);
skip_emulated_instruction(vcpu);
return 1;
case 8: {
@@ -2800,23 +2892,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
vmx_fpu_activate(vcpu);
- KVMTRACE_0D(CLTS, vcpu, handler);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
switch (cr) {
case 3:
kvm_register_write(vcpu, reg, vcpu->arch.cr3);
- KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
- (u32)kvm_register_read(vcpu, reg),
- (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
- handler);
+ trace_kvm_cr_read(cr, vcpu->arch.cr3);
skip_emulated_instruction(vcpu);
return 1;
case 8:
- kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
- KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
- (u32)kvm_register_read(vcpu, reg), handler);
+ val = kvm_get_cr8(vcpu);
+ kvm_register_write(vcpu, reg, val);
+ trace_kvm_cr_read(cr, val);
skip_emulated_instruction(vcpu);
return 1;
}
@@ -2884,7 +2972,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
val = 0;
}
kvm_register_write(vcpu, reg, val);
- KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else {
val = vcpu->arch.regs[reg];
switch (dr) {
@@ -2917,7 +3004,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
break;
}
- KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler);
}
skip_emulated_instruction(vcpu);
return 1;
@@ -2939,8 +3025,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
- KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
- handler);
+ trace_kvm_msr_read(ecx, data);
/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
@@ -2955,8 +3040,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
- KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
- handler);
+ trace_kvm_msr_write(ecx, data);
if (vmx_set_msr(vcpu, ecx, data) != 0) {
kvm_inject_gp(vcpu, 0);
@@ -2983,7 +3067,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
- KVMTRACE_0D(PEND_INTR, vcpu, handler);
++vcpu->stat.irq_window_exits;
/*
@@ -3012,6 +3095,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -3124,14 +3213,98 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
(long unsigned int)exit_qualification);
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- kvm_run->hw.hardware_exit_reason = 0;
- return -ENOTSUPP;
+ kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
+ return 0;
}
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ trace_kvm_page_fault(gpa, exit_qualification);
return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
}
+static u64 ept_rsvd_mask(u64 spte, int level)
+{
+ int i;
+ u64 mask = 0;
+
+ for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
+ mask |= (1ULL << i);
+
+ if (level > 2)
+ /* bits 7:3 reserved */
+ mask |= 0xf8;
+ else if (level == 2) {
+ if (spte & (1ULL << 7))
+ /* 2MB ref, bits 20:12 reserved */
+ mask |= 0x1ff000;
+ else
+ /* bits 6:3 reserved */
+ mask |= 0x78;
+ }
+
+ return mask;
+}
+
+static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
+ int level)
+{
+ printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
+
+ /* 010b (write-only) */
+ WARN_ON((spte & 0x7) == 0x2);
+
+ /* 110b (write/execute) */
+ WARN_ON((spte & 0x7) == 0x6);
+
+ /* 100b (execute-only) and value not supported by logical processor */
+ if (!cpu_has_vmx_ept_execute_only())
+ WARN_ON((spte & 0x7) == 0x4);
+
+ /* not 000b */
+ if ((spte & 0x7)) {
+ u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
+
+ if (rsvd_bits != 0) {
+ printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
+ __func__, rsvd_bits);
+ WARN_ON(1);
+ }
+
+ if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
+ u64 ept_mem_type = (spte & 0x38) >> 3;
+
+ if (ept_mem_type == 2 || ept_mem_type == 3 ||
+ ept_mem_type == 7) {
+ printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
+ __func__, ept_mem_type);
+ WARN_ON(1);
+ }
+ }
+ }
+}
+
+static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ u64 sptes[4];
+ int nr_sptes, i;
+ gpa_t gpa;
+
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+
+ printk(KERN_ERR "EPT: Misconfiguration.\n");
+ printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
+
+ nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
+
+ for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
+ ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
+
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+
+ return 0;
+}
+
static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u32 cpu_based_vm_exec_control;
@@ -3198,12 +3371,22 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
+ [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
+ [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
+ [EXIT_REASON_VMPTRST] = handle_vmx_insn,
+ [EXIT_REASON_VMREAD] = handle_vmx_insn,
+ [EXIT_REASON_VMRESUME] = handle_vmx_insn,
+ [EXIT_REASON_VMWRITE] = handle_vmx_insn,
+ [EXIT_REASON_VMOFF] = handle_vmx_insn,
+ [EXIT_REASON_VMON] = handle_vmx_insn,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
- [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
+ [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
+ [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
};
static const int kvm_vmx_max_exit_handlers =
@@ -3219,8 +3402,7 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
- KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
- (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
+ trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
/* If we need to emulate an MMIO from handle_invalid_guest_state
* we just return 0 */
@@ -3232,10 +3414,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */
- if (enable_ept && is_paging(vcpu)) {
+ if (enable_ept && is_paging(vcpu))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
- ept_load_pdptrs(vcpu);
- }
if (unlikely(vmx->fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -3311,10 +3491,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
/* We need to handle NMIs before interrupts are enabled */
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
- (exit_intr_info & INTR_INFO_VALID_MASK)) {
- KVMTRACE_0D(NMI, &vmx->vcpu, handler);
+ (exit_intr_info & INTR_INFO_VALID_MASK))
asm("int $2");
- }
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
@@ -3419,6 +3597,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (enable_ept && is_paging(vcpu)) {
+ vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
+ ept_load_pdptrs(vcpu);
+ }
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();
@@ -3434,6 +3616,14 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ /* When single-stepping over STI and MOV SS, we must clear the
+ * corresponding interruptibility bits in the guest state. Otherwise
+ * vmentry fails as it then expects bit 14 (BS) in pending debug
+ * exceptions being set, but that's not correct for the guest debugging
+ * case. */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vmx_set_interrupt_shadow(vcpu, 0);
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
@@ -3450,11 +3640,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
"mov %%"R"sp, %c[host_rsp](%0) \n\t"
__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
"1: \n\t"
+ /* Reload cr2 if changed */
+ "mov %c[cr2](%0), %%"R"ax \n\t"
+ "mov %%cr2, %%"R"dx \n\t"
+ "cmp %%"R"ax, %%"R"dx \n\t"
+ "je 2f \n\t"
+ "mov %%"R"ax, %%cr2 \n\t"
+ "2: \n\t"
/* Check if vmlaunch of vmresume is needed */
"cmpl $0, %c[launched](%0) \n\t"
/* Load guest registers. Don't clobber flags. */
- "mov %c[cr2](%0), %%"R"ax \n\t"
- "mov %%"R"ax, %%cr2 \n\t"
"mov %c[rax](%0), %%"R"ax \n\t"
"mov %c[rbx](%0), %%"R"bx \n\t"
"mov %c[rdx](%0), %%"R"dx \n\t"
@@ -3532,7 +3727,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
#endif
);
- vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
+ vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
+ | (1 << VCPU_EXREG_PDPTR));
vcpu->arch.regs_dirty = 0;
get_debugreg(vcpu->arch.dr6, 6);
@@ -3684,6 +3880,29 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return ret;
}
+static const struct trace_print_flags vmx_exit_reasons_str[] = {
+ { EXIT_REASON_EXCEPTION_NMI, "exception" },
+ { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
+ { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
+ { EXIT_REASON_NMI_WINDOW, "nmi_window" },
+ { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
+ { EXIT_REASON_CR_ACCESS, "cr_access" },
+ { EXIT_REASON_DR_ACCESS, "dr_access" },
+ { EXIT_REASON_CPUID, "cpuid" },
+ { EXIT_REASON_MSR_READ, "rdmsr" },
+ { EXIT_REASON_MSR_WRITE, "wrmsr" },
+ { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
+ { EXIT_REASON_HLT, "halt" },
+ { EXIT_REASON_INVLPG, "invlpg" },
+ { EXIT_REASON_VMCALL, "hypercall" },
+ { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
+ { EXIT_REASON_APIC_ACCESS, "apic_access" },
+ { EXIT_REASON_WBINVD, "wbinvd" },
+ { EXIT_REASON_TASK_SWITCH, "task_switch" },
+ { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
+ { -1, NULL }
+};
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -3743,6 +3962,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
.get_mt_mask = vmx_get_mt_mask,
+
+ .exit_reasons_str = vmx_exit_reasons_str,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 249540f98513..efb0b6850129 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -37,11 +37,16 @@
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
+#include <trace/events/kvm.h>
+#undef TRACE_INCLUDE_FILE
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#include <asm/uaccess.h>
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
+#include <asm/mce.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
@@ -55,6 +60,10 @@
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
+
+#define KVM_MAX_MCE_BANKS 32
+#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
+
/* EFER defaults:
* - enable syscall per default because its emulated by KVM
* - enable LME and LMA per default on 64 bit KVM
@@ -70,12 +79,13 @@ static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function, u32 index);
struct kvm_x86_ops *kvm_x86_ops;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
+int ignore_msrs = 0;
+module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
{ "pf_guest", VCPU_STAT(pf_guest) },
@@ -176,16 +186,22 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
++vcpu->stat.pf_guest;
if (vcpu->arch.exception.pending) {
- if (vcpu->arch.exception.nr == PF_VECTOR) {
- printk(KERN_DEBUG "kvm: inject_page_fault:"
- " double fault 0x%lx\n", addr);
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
- } else if (vcpu->arch.exception.nr == DF_VECTOR) {
+ switch(vcpu->arch.exception.nr) {
+ case DF_VECTOR:
/* triple fault -> shutdown */
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ return;
+ case PF_VECTOR:
+ vcpu->arch.exception.nr = DF_VECTOR;
+ vcpu->arch.exception.error_code = 0;
+ return;
+ default:
+ /* replace previous exception with a new one in a hope
+ that instruction re-execution will regenerate lost
+ exception */
+ vcpu->arch.exception.pending = false;
+ break;
}
- return;
}
vcpu->arch.cr2 = addr;
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
@@ -232,7 +248,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
goto out;
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
- if (is_present_pte(pdpte[i]) &&
+ if (is_present_gpte(pdpte[i]) &&
(pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
ret = 0;
goto out;
@@ -241,6 +257,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
ret = 1;
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
+ __set_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_dirty);
out:
return ret;
@@ -256,6 +276,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
+ if (!test_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_avail))
+ return true;
+
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
@@ -328,9 +352,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
- KVMTRACE_1D(LMSW, vcpu,
- (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
- handler);
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
@@ -466,7 +487,7 @@ static u32 msrs_to_save[] = {
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
- MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
};
@@ -644,8 +665,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
- &vcpu->hv_clock.tsc_timestamp);
+ kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts);
local_irq_restore(flags);
@@ -704,11 +724,48 @@ static bool msr_mtrr_valid(unsigned msr)
return false;
}
+static bool valid_pat_type(unsigned t)
+{
+ return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
+}
+
+static bool valid_mtrr_type(unsigned t)
+{
+ return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
+}
+
+static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ int i;
+
+ if (!msr_mtrr_valid(msr))
+ return false;
+
+ if (msr == MSR_IA32_CR_PAT) {
+ for (i = 0; i < 8; i++)
+ if (!valid_pat_type((data >> (i * 8)) & 0xff))
+ return false;
+ return true;
+ } else if (msr == MSR_MTRRdefType) {
+ if (data & ~0xcff)
+ return false;
+ return valid_mtrr_type(data & 0xff);
+ } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
+ for (i = 0; i < 8 ; i++)
+ if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
+ return false;
+ return true;
+ }
+
+ /* variable MTRRs */
+ return valid_mtrr_type(data & 0xff);
+}
+
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
- if (!msr_mtrr_valid(msr))
+ if (!mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
@@ -741,23 +798,53 @@ static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return 0;
}
+static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ u64 mcg_cap = vcpu->arch.mcg_cap;
+ unsigned bank_num = mcg_cap & 0xff;
+
+ switch (msr) {
+ case MSR_IA32_MCG_STATUS:
+ vcpu->arch.mcg_status = data;
+ break;
+ case MSR_IA32_MCG_CTL:
+ if (!(mcg_cap & MCG_CTL_P))
+ return 1;
+ if (data != 0 && data != ~(u64)0)
+ return -1;
+ vcpu->arch.mcg_ctl = data;
+ break;
+ default:
+ if (msr >= MSR_IA32_MC0_CTL &&
+ msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
+ u32 offset = msr - MSR_IA32_MC0_CTL;
+ /* only 0 or all 1s can be written to IA32_MCi_CTL */
+ if ((offset & 0x3) == 0 &&
+ data != 0 && data != ~(u64)0)
+ return -1;
+ vcpu->arch.mce_banks[offset] = data;
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
case MSR_EFER:
set_efer(vcpu, data);
break;
- case MSR_IA32_MC0_STATUS:
- pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
- __func__, data);
- break;
- case MSR_IA32_MCG_STATUS:
- pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
- __func__, data);
+ case MSR_K7_HWCR:
+ data &= ~(u64)0x40; /* ignore flush filter disable */
+ if (data != 0) {
+ pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
+ data);
+ return 1;
+ }
break;
- case MSR_IA32_MCG_CTL:
- pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
- __func__, data);
+ case MSR_AMD64_NB_CFG:
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
@@ -780,6 +867,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
@@ -813,9 +902,50 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
kvm_request_guest_time_update(vcpu);
break;
}
+ case MSR_IA32_MCG_CTL:
+ case MSR_IA32_MCG_STATUS:
+ case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
+ return set_msr_mce(vcpu, msr, data);
+
+ /* Performance counters are not protected by a CPUID bit,
+ * so we should check all of them in the generic path for the sake of
+ * cross vendor migration.
+ * Writing a zero into the event select MSRs disables them,
+ * which we perfectly emulate ;-). Any other value should be at least
+ * reported, some guests depend on them.
+ */
+ case MSR_P6_EVNTSEL0:
+ case MSR_P6_EVNTSEL1:
+ case MSR_K7_EVNTSEL0:
+ case MSR_K7_EVNTSEL1:
+ case MSR_K7_EVNTSEL2:
+ case MSR_K7_EVNTSEL3:
+ if (data != 0)
+ pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
+ "0x%x data 0x%llx\n", msr, data);
+ break;
+ /* at least RHEL 4 unconditionally writes to the perfctr registers,
+ * so we ignore writes to make it happy.
+ */
+ case MSR_P6_PERFCTR0:
+ case MSR_P6_PERFCTR1:
+ case MSR_K7_PERFCTR0:
+ case MSR_K7_PERFCTR1:
+ case MSR_K7_PERFCTR2:
+ case MSR_K7_PERFCTR3:
+ pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
+ "0x%x data 0x%llx\n", msr, data);
+ break;
default:
- pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
- return 1;
+ if (!ignore_msrs) {
+ pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
+ msr, data);
+ return 1;
+ } else {
+ pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
+ msr, data);
+ break;
+ }
}
return 0;
}
@@ -868,26 +998,47 @@ static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0;
}
-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
+ u64 mcg_cap = vcpu->arch.mcg_cap;
+ unsigned bank_num = mcg_cap & 0xff;
switch (msr) {
- case 0xc0010010: /* SYSCFG */
- case 0xc0010015: /* HWCR */
- case MSR_IA32_PLATFORM_ID:
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
- case MSR_IA32_MC0_CTL:
- case MSR_IA32_MCG_STATUS:
+ data = 0;
+ break;
case MSR_IA32_MCG_CAP:
+ data = vcpu->arch.mcg_cap;
+ break;
case MSR_IA32_MCG_CTL:
- case MSR_IA32_MC0_MISC:
- case MSR_IA32_MC0_MISC+4:
- case MSR_IA32_MC0_MISC+8:
- case MSR_IA32_MC0_MISC+12:
- case MSR_IA32_MC0_MISC+16:
- case MSR_IA32_MC0_MISC+20:
+ if (!(mcg_cap & MCG_CTL_P))
+ return 1;
+ data = vcpu->arch.mcg_ctl;
+ break;
+ case MSR_IA32_MCG_STATUS:
+ data = vcpu->arch.mcg_status;
+ break;
+ default:
+ if (msr >= MSR_IA32_MC0_CTL &&
+ msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
+ u32 offset = msr - MSR_IA32_MC0_CTL;
+ data = vcpu->arch.mce_banks[offset];
+ break;
+ }
+ return 1;
+ }
+ *pdata = data;
+ return 0;
+}
+
+int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ u64 data;
+
+ switch (msr) {
+ case MSR_IA32_PLATFORM_ID:
case MSR_IA32_UCODE_REV:
case MSR_IA32_EBL_CR_POWERON:
case MSR_IA32_DEBUGCTLMSR:
@@ -895,9 +1046,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
+ case MSR_K8_SYSCFG:
+ case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
+ case MSR_K7_EVNTSEL0:
+ case MSR_K8_INT_PENDING_MSG:
+ case MSR_AMD64_NB_CFG:
data = 0;
break;
case MSR_MTRRcap:
@@ -911,6 +1067,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_APICBASE:
data = kvm_get_apic_base(vcpu);
break;
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ return kvm_x2apic_msr_read(vcpu, msr, pdata);
+ break;
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
@@ -929,9 +1088,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_SYSTEM_TIME:
data = vcpu->arch.time;
break;
+ case MSR_IA32_P5_MC_ADDR:
+ case MSR_IA32_P5_MC_TYPE:
+ case MSR_IA32_MCG_CAP:
+ case MSR_IA32_MCG_CTL:
+ case MSR_IA32_MCG_STATUS:
+ case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
+ return get_msr_mce(vcpu, msr, pdata);
default:
- pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
- return 1;
+ if (!ignore_msrs) {
+ pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
+ return 1;
+ } else {
+ pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
+ data = 0;
+ }
+ break;
}
*pdata = data;
return 0;
@@ -1030,6 +1202,8 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_ASSIGN_DEV_IRQ:
+ case KVM_CAP_IRQFD:
+ case KVM_CAP_PIT2:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -1050,6 +1224,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IOMMU:
r = iommu_found();
break;
+ case KVM_CAP_MCE:
+ r = KVM_MAX_MCE_BANKS;
+ break;
default:
r = 0;
break;
@@ -1110,6 +1287,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_X86_GET_MCE_CAP_SUPPORTED: {
+ u64 mce_cap;
+
+ mce_cap = KVM_MCE_CAP_SUPPORTED;
+ r = -EFAULT;
+ if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
+ goto out;
+ r = 0;
+ break;
+ }
default:
r = -EINVAL;
}
@@ -1190,6 +1377,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_nent = cpuid->nent;
cpuid_fix_nx_cap(vcpu);
r = 0;
+ kvm_apic_set_version(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1211,6 +1399,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
+ kvm_apic_set_version(vcpu);
return 0;
out:
@@ -1286,7 +1475,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
0 /* Reserved, DCA */ | F(XMM4_1) |
- F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
+ F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
0 /* Reserved, XSAVE, OSXSAVE */;
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
@@ -1398,6 +1587,10 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
do_cpuid_ent(&cpuid_entries[nent], func, 0,
&nent, cpuid->nent);
+ r = -E2BIG;
+ if (nent >= cpuid->nent)
+ goto out_free;
+
r = -EFAULT;
if (copy_to_user(entries, cpuid_entries,
nent * sizeof(struct kvm_cpuid_entry2)))
@@ -1466,6 +1659,80 @@ static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
+ u64 mcg_cap)
+{
+ int r;
+ unsigned bank_num = mcg_cap & 0xff, bank;
+
+ r = -EINVAL;
+ if (!bank_num)
+ goto out;
+ if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
+ goto out;
+ r = 0;
+ vcpu->arch.mcg_cap = mcg_cap;
+ /* Init IA32_MCG_CTL to all 1s */
+ if (mcg_cap & MCG_CTL_P)
+ vcpu->arch.mcg_ctl = ~(u64)0;
+ /* Init IA32_MCi_CTL to all 1s */
+ for (bank = 0; bank < bank_num; bank++)
+ vcpu->arch.mce_banks[bank*4] = ~(u64)0;
+out:
+ return r;
+}
+
+static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
+ struct kvm_x86_mce *mce)
+{
+ u64 mcg_cap = vcpu->arch.mcg_cap;
+ unsigned bank_num = mcg_cap & 0xff;
+ u64 *banks = vcpu->arch.mce_banks;
+
+ if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
+ return -EINVAL;
+ /*
+ * if IA32_MCG_CTL is not all 1s, the uncorrected error
+ * reporting is disabled
+ */
+ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
+ vcpu->arch.mcg_ctl != ~(u64)0)
+ return 0;
+ banks += 4 * mce->bank;
+ /*
+ * if IA32_MCi_CTL is not all 1s, the uncorrected error
+ * reporting is disabled for the bank
+ */
+ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
+ return 0;
+ if (mce->status & MCI_STATUS_UC) {
+ if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
+ !(vcpu->arch.cr4 & X86_CR4_MCE)) {
+ printk(KERN_DEBUG "kvm: set_mce: "
+ "injects mce exception while "
+ "previous one is in progress!\n");
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ return 0;
+ }
+ if (banks[1] & MCI_STATUS_VAL)
+ mce->status |= MCI_STATUS_OVER;
+ banks[2] = mce->addr;
+ banks[3] = mce->misc;
+ vcpu->arch.mcg_status = mce->mcg_status;
+ banks[1] = mce->status;
+ kvm_queue_exception(vcpu, MC_VECTOR);
+ } else if (!(banks[1] & MCI_STATUS_VAL)
+ || !(banks[1] & MCI_STATUS_UC)) {
+ if (banks[1] & MCI_STATUS_VAL)
+ mce->status |= MCI_STATUS_OVER;
+ banks[2] = mce->addr;
+ banks[3] = mce->misc;
+ banks[1] = mce->status;
+ } else
+ banks[1] |= MCI_STATUS_OVER;
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1599,6 +1866,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
+ case KVM_X86_SETUP_MCE: {
+ u64 mcg_cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
+ goto out;
+ r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
+ break;
+ }
+ case KVM_X86_SET_MCE: {
+ struct kvm_x86_mce mce;
+
+ r = -EFAULT;
+ if (copy_from_user(&mce, argp, sizeof mce))
+ goto out;
+ r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -1738,19 +2023,25 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
+ spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[0],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
+ spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_PIC_SLAVE:
+ spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[1],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
+ spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_IOAPIC:
+ mutex_lock(&kvm->irq_lock);
memcpy(ioapic_irqchip(kvm),
&chip->chip.ioapic,
sizeof(struct kvm_ioapic_state));
+ mutex_unlock(&kvm->irq_lock);
break;
default:
r = -EINVAL;
@@ -1764,7 +2055,9 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
@@ -1772,8 +2065,10 @@ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count);
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
@@ -1782,7 +2077,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
{
if (!kvm->arch.vpit)
return -ENXIO;
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
@@ -1833,6 +2130,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
union {
struct kvm_pit_state ps;
struct kvm_memory_alias alias;
+ struct kvm_pit_config pit_config;
} u;
switch (ioctl) {
@@ -1893,16 +2191,24 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
break;
case KVM_CREATE_PIT:
- mutex_lock(&kvm->lock);
+ u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
+ goto create_pit;
+ case KVM_CREATE_PIT2:
+ r = -EFAULT;
+ if (copy_from_user(&u.pit_config, argp,
+ sizeof(struct kvm_pit_config)))
+ goto out;
+ create_pit:
+ down_write(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOMEM;
- kvm->arch.vpit = kvm_create_pit(kvm);
+ kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
- mutex_unlock(&kvm->lock);
+ up_write(&kvm->slots_lock);
break;
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
@@ -1913,10 +2219,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
if (irqchip_in_kernel(kvm)) {
__s32 status;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->irq_lock);
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->irq_lock);
if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
@@ -2038,35 +2344,23 @@ static void kvm_init_msr_list(void)
num_msrs_to_save = j;
}
-/*
- * Only apic need an MMIO device hook, so shortcut now..
- */
-static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
- gpa_t addr, int len,
- int is_write)
+static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
+ const void *v)
{
- struct kvm_io_device *dev;
+ if (vcpu->arch.apic &&
+ !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
+ return 0;
- if (vcpu->arch.apic) {
- dev = &vcpu->arch.apic->dev;
- if (dev->in_range(dev, addr, len, is_write))
- return dev;
- }
- return NULL;
+ return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
}
-
-static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
- gpa_t addr, int len,
- int is_write)
+static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{
- struct kvm_io_device *dev;
+ if (vcpu->arch.apic &&
+ !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
+ return 0;
- dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
- if (dev == NULL)
- dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
- is_write);
- return dev;
+ return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
}
static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
@@ -2135,11 +2429,12 @@ static int emulator_read_emulated(unsigned long addr,
unsigned int bytes,
struct kvm_vcpu *vcpu)
{
- struct kvm_io_device *mmio_dev;
gpa_t gpa;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
+ vcpu->mmio_phys_addr, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return X86EMUL_CONTINUE;
}
@@ -2160,14 +2455,12 @@ mmio:
/*
* Is this MMIO handled locally?
*/
- mutex_lock(&vcpu->kvm->lock);
- mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
- if (mmio_dev) {
- kvm_iodevice_read(mmio_dev, gpa, bytes, val);
- mutex_unlock(&vcpu->kvm->lock);
+ if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
return X86EMUL_CONTINUE;
}
- mutex_unlock(&vcpu->kvm->lock);
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
@@ -2194,7 +2487,6 @@ static int emulator_write_emulated_onepage(unsigned long addr,
unsigned int bytes,
struct kvm_vcpu *vcpu)
{
- struct kvm_io_device *mmio_dev;
gpa_t gpa;
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
@@ -2212,17 +2504,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
return X86EMUL_CONTINUE;
mmio:
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
/*
* Is this MMIO handled locally?
*/
- mutex_lock(&vcpu->kvm->lock);
- mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
- if (mmio_dev) {
- kvm_iodevice_write(mmio_dev, gpa, bytes, val);
- mutex_unlock(&vcpu->kvm->lock);
+ if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
return X86EMUL_CONTINUE;
- }
- mutex_unlock(&vcpu->kvm->lock);
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
@@ -2311,7 +2598,6 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
int emulate_clts(struct kvm_vcpu *vcpu)
{
- KVMTRACE_0D(CLTS, vcpu, handler);
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
return X86EMUL_CONTINUE;
}
@@ -2412,14 +2698,33 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
- /* Reject the instructions other than VMCALL/VMMCALL when
- * try to emulate invalid opcode */
+ /* Only allow emulation of specific instructions on #UD
+ * (namely VMMCALL, sysenter, sysexit, syscall)*/
c = &vcpu->arch.emulate_ctxt.decode;
- if ((emulation_type & EMULTYPE_TRAP_UD) &&
- (!(c->twobyte && c->b == 0x01 &&
- (c->modrm_reg == 0 || c->modrm_reg == 3) &&
- c->modrm_mod == 3 && c->modrm_rm == 1)))
- return EMULATE_FAIL;
+ if (emulation_type & EMULTYPE_TRAP_UD) {
+ if (!c->twobyte)
+ return EMULATE_FAIL;
+ switch (c->b) {
+ case 0x01: /* VMMCALL */
+ if (c->modrm_mod != 3 || c->modrm_rm != 1)
+ return EMULATE_FAIL;
+ break;
+ case 0x34: /* sysenter */
+ case 0x35: /* sysexit */
+ if (c->modrm_mod != 0 || c->modrm_rm != 0)
+ return EMULATE_FAIL;
+ break;
+ case 0x05: /* syscall */
+ if (c->modrm_mod != 0 || c->modrm_rm != 0)
+ return EMULATE_FAIL;
+ break;
+ default:
+ return EMULATE_FAIL;
+ }
+
+ if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
+ return EMULATE_FAIL;
+ }
++vcpu->stat.insn_emulation;
if (r) {
@@ -2539,52 +2844,40 @@ int complete_pio(struct kvm_vcpu *vcpu)
return 0;
}
-static void kernel_pio(struct kvm_io_device *pio_dev,
- struct kvm_vcpu *vcpu,
- void *pd)
+static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
/* TODO: String I/O for in kernel device */
+ int r;
- mutex_lock(&vcpu->kvm->lock);
if (vcpu->arch.pio.in)
- kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
- vcpu->arch.pio.size,
- pd);
+ r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
+ vcpu->arch.pio.size, pd);
else
- kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
- vcpu->arch.pio.size,
- pd);
- mutex_unlock(&vcpu->kvm->lock);
+ r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
+ vcpu->arch.pio.size, pd);
+ return r;
}
-static void pio_string_write(struct kvm_io_device *pio_dev,
- struct kvm_vcpu *vcpu)
+static int pio_string_write(struct kvm_vcpu *vcpu)
{
struct kvm_pio_request *io = &vcpu->arch.pio;
void *pd = vcpu->arch.pio_data;
- int i;
+ int i, r = 0;
- mutex_lock(&vcpu->kvm->lock);
for (i = 0; i < io->cur_count; i++) {
- kvm_iodevice_write(pio_dev, io->port,
- io->size,
- pd);
+ if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
+ io->port, io->size, pd)) {
+ r = -EOPNOTSUPP;
+ break;
+ }
pd += io->size;
}
- mutex_unlock(&vcpu->kvm->lock);
-}
-
-static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
- gpa_t addr, int len,
- int is_write)
-{
- return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
+ return r;
}
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned port)
{
- struct kvm_io_device *pio_dev;
unsigned long val;
vcpu->run->exit_reason = KVM_EXIT_IO;
@@ -2598,19 +2891,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.down = 0;
vcpu->arch.pio.rep = 0;
- if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
- KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
- handler);
- else
- KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
- handler);
+ trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
+ size, 1);
val = kvm_register_read(vcpu, VCPU_REGS_RAX);
memcpy(vcpu->arch.pio_data, &val, 4);
- pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
- if (pio_dev) {
- kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
+ if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
complete_pio(vcpu);
return 1;
}
@@ -2624,7 +2911,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
{
unsigned now, in_page;
int ret = 0;
- struct kvm_io_device *pio_dev;
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2637,12 +2923,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.down = down;
vcpu->arch.pio.rep = rep;
- if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
- KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
- handler);
- else
- KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
- handler);
+ trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
+ size, count);
if (!count) {
kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2672,9 +2954,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.guest_gva = address;
- pio_dev = vcpu_find_pio_dev(vcpu, port,
- vcpu->arch.pio.cur_count,
- !vcpu->arch.pio.in);
if (!vcpu->arch.pio.in) {
/* string PIO write */
ret = pio_copy_data(vcpu);
@@ -2682,16 +2961,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
kvm_inject_gp(vcpu, 0);
return 1;
}
- if (ret == 0 && pio_dev) {
- pio_string_write(pio_dev, vcpu);
+ if (ret == 0 && !pio_string_write(vcpu)) {
complete_pio(vcpu);
if (vcpu->arch.pio.count == 0)
ret = 1;
}
- } else if (pio_dev)
- pr_unimpl(vcpu, "no string pio read support yet, "
- "port %x size %d count %ld\n",
- port, size, count);
+ }
+ /* no string PIO read support yet */
return ret;
}
@@ -2724,10 +3000,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
if (!kvm_request_guest_time_update(vcpu))
@@ -2820,7 +3093,6 @@ void kvm_arch_exit(void)
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
- KVMTRACE_0D(HLT, vcpu, handler);
if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
return 1;
@@ -2851,7 +3123,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
- KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
+ trace_kvm_hypercall(nr, a0, a1, a2, a3);
if (!is_long_mode(vcpu)) {
nr &= 0xFFFFFFFF;
@@ -2951,8 +3223,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
return 0;
}
- KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
- (u32)((u64)value >> 32), handler);
return value;
}
@@ -2960,9 +3230,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
unsigned long *rflags)
{
- KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
- (u32)((u64)val >> 32), handler);
-
switch (cr) {
case 0:
kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
@@ -3072,11 +3339,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
}
kvm_x86_ops->skip_emulated_instruction(vcpu);
- KVMTRACE_5D(CPUID, vcpu, function,
- (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
- (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
- (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
- (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
+ trace_kvm_cpuid(function,
+ kvm_register_read(vcpu, VCPU_REGS_RAX),
+ kvm_register_read(vcpu, VCPU_REGS_RBX),
+ kvm_register_read(vcpu, VCPU_REGS_RCX),
+ kvm_register_read(vcpu, VCPU_REGS_RDX));
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
@@ -3157,9 +3424,6 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- kvm_x86_ops->set_interrupt_shadow(vcpu, 0);
-
/* try to reinject previous events if any */
if (vcpu->arch.nmi_injected) {
kvm_x86_ops->set_nmi(vcpu);
@@ -3275,7 +3539,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
set_debugreg(vcpu->arch.eff_db[3], 3);
}
- KVMTRACE_0D(VMENTRY, vcpu, entryexit);
+ trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu, kvm_run);
if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -4111,7 +4375,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
/* Older userspace won't unhalt the vcpu on reset. */
- if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
+ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!(vcpu->arch.cr0 & X86_CR0_PE))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -4382,7 +4646,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm = vcpu->kvm;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
- if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+ if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -4404,6 +4668,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_mmu_destroy;
}
+ vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
+ GFP_KERNEL);
+ if (!vcpu->arch.mce_banks) {
+ r = -ENOMEM;
+ goto fail_mmu_destroy;
+ }
+ vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
+
return 0;
fail_mmu_destroy:
@@ -4451,20 +4723,22 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
/*
* Unpin any mmu pages first.
*/
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm_unload_vcpu_mmu(kvm->vcpus[i]);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_unload_vcpu_mmu(vcpu);
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -4580,3 +4854,9 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops->interrupt_allowed(vcpu);
}
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 4c8e10af78e8..5eadea585d2a 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -31,4 +31,8 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
{
return (nr == BP_VECTOR) || (nr == OF_VECTOR);
}
+
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function, u32 index);
+
#endif
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index c1b6c232e02b..c6663d46f328 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -32,6 +32,8 @@
#include <linux/module.h>
#include <asm/kvm_x86_emulate.h>
+#include "mmu.h" /* for is_long_mode() */
+
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
@@ -60,6 +62,7 @@
#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
#define SrcOne (7<<4) /* Implied '1' */
#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
+#define SrcImmU (9<<4) /* Immediate operand, unsigned */
#define SrcMask (0xf<<4)
/* Generic ModRM decode. */
#define ModRM (1<<8)
@@ -195,7 +198,7 @@ static u32 opcode_table[256] = {
ByteOp | SrcImmUByte, SrcImmUByte,
/* 0xE8 - 0xEF */
SrcImm | Stack, SrcImm | ImplicitOps,
- SrcImm | Src2Imm16, SrcImmByte | ImplicitOps,
+ SrcImmU | Src2Imm16, SrcImmByte | ImplicitOps,
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
/* 0xF0 - 0xF7 */
@@ -208,7 +211,7 @@ static u32 opcode_table[256] = {
static u32 twobyte_table[256] = {
/* 0x00 - 0x0F */
- 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0,
+ 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
@@ -216,7 +219,9 @@ static u32 twobyte_table[256] = {
ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x30 - 0x3F */
- ImplicitOps, 0, ImplicitOps, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ImplicitOps, 0, ImplicitOps, 0,
+ ImplicitOps, ImplicitOps, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
/* 0x40 - 0x47 */
DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -319,8 +324,11 @@ static u32 group2_table[] = {
};
/* EFLAGS bit definitions. */
+#define EFLG_VM (1<<17)
+#define EFLG_RF (1<<16)
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
+#define EFLG_IF (1<<9)
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
@@ -1027,6 +1035,7 @@ done_prefixes:
c->src.type = OP_MEM;
break;
case SrcImm:
+ case SrcImmU:
c->src.type = OP_IMM;
c->src.ptr = (unsigned long *)c->eip;
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
@@ -1044,6 +1053,19 @@ done_prefixes:
c->src.val = insn_fetch(s32, 4, c->eip);
break;
}
+ if ((c->d & SrcMask) == SrcImmU) {
+ switch (c->src.bytes) {
+ case 1:
+ c->src.val &= 0xff;
+ break;
+ case 2:
+ c->src.val &= 0xffff;
+ break;
+ case 4:
+ c->src.val &= 0xffffffff;
+ break;
+ }
+ }
break;
case SrcImmByte:
case SrcImmUByte:
@@ -1361,7 +1383,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
return 0;
}
-void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
+static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
{
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
/*
@@ -1375,6 +1397,217 @@ void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
ctxt->interruptibility = mask;
}
+static inline void
+setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
+ struct kvm_segment *cs, struct kvm_segment *ss)
+{
+ memset(cs, 0, sizeof(struct kvm_segment));
+ kvm_x86_ops->get_segment(ctxt->vcpu, cs, VCPU_SREG_CS);
+ memset(ss, 0, sizeof(struct kvm_segment));
+
+ cs->l = 0; /* will be adjusted later */
+ cs->base = 0; /* flat segment */
+ cs->g = 1; /* 4kb granularity */
+ cs->limit = 0xffffffff; /* 4GB limit */
+ cs->type = 0x0b; /* Read, Execute, Accessed */
+ cs->s = 1;
+ cs->dpl = 0; /* will be adjusted later */
+ cs->present = 1;
+ cs->db = 1;
+
+ ss->unusable = 0;
+ ss->base = 0; /* flat segment */
+ ss->limit = 0xffffffff; /* 4GB limit */
+ ss->g = 1; /* 4kb granularity */
+ ss->s = 1;
+ ss->type = 0x03; /* Read/Write, Accessed */
+ ss->db = 1; /* 32bit stack segment */
+ ss->dpl = 0;
+ ss->present = 1;
+}
+
+static int
+emulate_syscall(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ struct kvm_segment cs, ss;
+ u64 msr_data;
+
+ /* syscall is not available in real mode */
+ if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
+ || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
+ return -1;
+
+ setup_syscalls_segments(ctxt, &cs, &ss);
+
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
+ msr_data >>= 32;
+ cs.selector = (u16)(msr_data & 0xfffc);
+ ss.selector = (u16)(msr_data + 8);
+
+ if (is_long_mode(ctxt->vcpu)) {
+ cs.db = 0;
+ cs.l = 1;
+ }
+ kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
+ kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
+
+ c->regs[VCPU_REGS_RCX] = c->eip;
+ if (is_long_mode(ctxt->vcpu)) {
+#ifdef CONFIG_X86_64
+ c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
+
+ kvm_x86_ops->get_msr(ctxt->vcpu,
+ ctxt->mode == X86EMUL_MODE_PROT64 ?
+ MSR_LSTAR : MSR_CSTAR, &msr_data);
+ c->eip = msr_data;
+
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
+ ctxt->eflags &= ~(msr_data | EFLG_RF);
+#endif
+ } else {
+ /* legacy mode */
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
+ c->eip = (u32)msr_data;
+
+ ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+ }
+
+ return 0;
+}
+
+static int
+emulate_sysenter(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ struct kvm_segment cs, ss;
+ u64 msr_data;
+
+ /* inject #UD if LOCK prefix is used */
+ if (c->lock_prefix)
+ return -1;
+
+ /* inject #GP if in real mode or paging is disabled */
+ if (ctxt->mode == X86EMUL_MODE_REAL ||
+ !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+
+ /* XXX sysenter/sysexit have not been tested in 64bit mode.
+ * Therefore, we inject an #UD.
+ */
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ return -1;
+
+ setup_syscalls_segments(ctxt, &cs, &ss);
+
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
+ switch (ctxt->mode) {
+ case X86EMUL_MODE_PROT32:
+ if ((msr_data & 0xfffc) == 0x0) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+ break;
+ case X86EMUL_MODE_PROT64:
+ if (msr_data == 0x0) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+ break;
+ }
+
+ ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+ cs.selector = (u16)msr_data;
+ cs.selector &= ~SELECTOR_RPL_MASK;
+ ss.selector = cs.selector + 8;
+ ss.selector &= ~SELECTOR_RPL_MASK;
+ if (ctxt->mode == X86EMUL_MODE_PROT64
+ || is_long_mode(ctxt->vcpu)) {
+ cs.db = 0;
+ cs.l = 1;
+ }
+
+ kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
+ kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
+
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
+ c->eip = msr_data;
+
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
+ c->regs[VCPU_REGS_RSP] = msr_data;
+
+ return 0;
+}
+
+static int
+emulate_sysexit(struct x86_emulate_ctxt *ctxt)
+{
+ struct decode_cache *c = &ctxt->decode;
+ struct kvm_segment cs, ss;
+ u64 msr_data;
+ int usermode;
+
+ /* inject #UD if LOCK prefix is used */
+ if (c->lock_prefix)
+ return -1;
+
+ /* inject #GP if in real mode or paging is disabled */
+ if (ctxt->mode == X86EMUL_MODE_REAL
+ || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+
+ /* sysexit must be called from CPL 0 */
+ if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+
+ setup_syscalls_segments(ctxt, &cs, &ss);
+
+ if ((c->rex_prefix & 0x8) != 0x0)
+ usermode = X86EMUL_MODE_PROT64;
+ else
+ usermode = X86EMUL_MODE_PROT32;
+
+ cs.dpl = 3;
+ ss.dpl = 3;
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
+ switch (usermode) {
+ case X86EMUL_MODE_PROT32:
+ cs.selector = (u16)(msr_data + 16);
+ if ((msr_data & 0xfffc) == 0x0) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+ ss.selector = (u16)(msr_data + 24);
+ break;
+ case X86EMUL_MODE_PROT64:
+ cs.selector = (u16)(msr_data + 32);
+ if (msr_data == 0x0) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return -1;
+ }
+ ss.selector = cs.selector + 8;
+ cs.db = 0;
+ cs.l = 1;
+ break;
+ }
+ cs.selector |= SELECTOR_RPL_MASK;
+ ss.selector |= SELECTOR_RPL_MASK;
+
+ kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
+ kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
+
+ c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
+ c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
+
+ return 0;
+}
+
int
x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
@@ -1970,6 +2203,12 @@ twobyte_insn:
goto cannot_emulate;
}
break;
+ case 0x05: /* syscall */
+ if (emulate_syscall(ctxt) == -1)
+ goto cannot_emulate;
+ else
+ goto writeback;
+ break;
case 0x06:
emulate_clts(ctxt->vcpu);
c->dst.type = OP_NONE;
@@ -2036,6 +2275,18 @@ twobyte_insn:
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
break;
+ case 0x34: /* sysenter */
+ if (emulate_sysenter(ctxt) == -1)
+ goto cannot_emulate;
+ else
+ goto writeback;
+ break;
+ case 0x35: /* sysexit */
+ if (emulate_sysexit(ctxt) == -1)
+ goto cannot_emulate;
+ else
+ goto writeback;
+ break;
case 0x40 ... 0x4f: /* cmov */
c->dst.val = c->dst.orig_val = c->src.val;
if (!test_cc(c->b, ctxt->eflags))
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7bc65f0f62c4..0188fd37b6c0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1079,7 +1079,7 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
return insn_len;
}
-/*G:030 Once we get to lguest_init(), we know we're a Guest. The various
+/*G:029 Once we get to lguest_init(), we know we're a Guest. The various
* pv_ops structures in the kernel provide points for (almost) every routine we
* have to override to avoid privileged instructions. */
__init void lguest_init(void)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f9d35632666b..07c31899c9c2 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -10,6 +10,7 @@ lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
ifeq ($(CONFIG_X86_32),y)
+ obj-y += atomic64_32.o
lib-y += checksum_32.o
lib-y += strstr_32.o
lib-y += semaphore_32.o string_32.o
diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
new file mode 100644
index 000000000000..824fa0be55a3
--- /dev/null
+++ b/arch/x86/lib/atomic64_32.c
@@ -0,0 +1,230 @@
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+#include <asm/cmpxchg.h>
+#include <asm/atomic.h>
+
+static noinline u64 cmpxchg8b(u64 *ptr, u64 old, u64 new)
+{
+ u32 low = new;
+ u32 high = new >> 32;
+
+ asm volatile(
+ LOCK_PREFIX "cmpxchg8b %1\n"
+ : "+A" (old), "+m" (*ptr)
+ : "b" (low), "c" (high)
+ );
+ return old;
+}
+
+u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
+{
+ return cmpxchg8b(&ptr->counter, old_val, new_val);
+}
+EXPORT_SYMBOL(atomic64_cmpxchg);
+
+/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ * @new_val: value to assign
+ *
+ * Atomically xchgs the value of @ptr to @new_val and returns
+ * the old value.
+ */
+u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
+{
+ /*
+ * Try first with a (possibly incorrect) assumption about
+ * what we have there. We'll do two loops most likely,
+ * but we'll get an ownership MESI transaction straight away
+ * instead of a read transaction followed by a
+ * flush-for-ownership transaction:
+ */
+ u64 old_val, real_val = 0;
+
+ do {
+ old_val = real_val;
+
+ real_val = atomic64_cmpxchg(ptr, old_val, new_val);
+
+ } while (real_val != old_val);
+
+ return old_val;
+}
+EXPORT_SYMBOL(atomic64_xchg);
+
+/**
+ * atomic64_set - set atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ * @new_val: value to assign
+ *
+ * Atomically sets the value of @ptr to @new_val.
+ */
+void atomic64_set(atomic64_t *ptr, u64 new_val)
+{
+ atomic64_xchg(ptr, new_val);
+}
+EXPORT_SYMBOL(atomic64_set);
+
+/**
+EXPORT_SYMBOL(atomic64_read);
+ * atomic64_add_return - add and return
+ * @delta: integer value to add
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
+ */
+noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
+{
+ /*
+ * Try first with a (possibly incorrect) assumption about
+ * what we have there. We'll do two loops most likely,
+ * but we'll get an ownership MESI transaction straight away
+ * instead of a read transaction followed by a
+ * flush-for-ownership transaction:
+ */
+ u64 old_val, new_val, real_val = 0;
+
+ do {
+ old_val = real_val;
+ new_val = old_val + delta;
+
+ real_val = atomic64_cmpxchg(ptr, old_val, new_val);
+
+ } while (real_val != old_val);
+
+ return new_val;
+}
+EXPORT_SYMBOL(atomic64_add_return);
+
+u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
+{
+ return atomic64_add_return(-delta, ptr);
+}
+EXPORT_SYMBOL(atomic64_sub_return);
+
+u64 atomic64_inc_return(atomic64_t *ptr)
+{
+ return atomic64_add_return(1, ptr);
+}
+EXPORT_SYMBOL(atomic64_inc_return);
+
+u64 atomic64_dec_return(atomic64_t *ptr)
+{
+ return atomic64_sub_return(1, ptr);
+}
+EXPORT_SYMBOL(atomic64_dec_return);
+
+/**
+ * atomic64_add - add integer to atomic64 variable
+ * @delta: integer value to add
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically adds @delta to @ptr.
+ */
+void atomic64_add(u64 delta, atomic64_t *ptr)
+{
+ atomic64_add_return(delta, ptr);
+}
+EXPORT_SYMBOL(atomic64_add);
+
+/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @delta: integer value to subtract
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically subtracts @delta from @ptr.
+ */
+void atomic64_sub(u64 delta, atomic64_t *ptr)
+{
+ atomic64_add(-delta, ptr);
+}
+EXPORT_SYMBOL(atomic64_sub);
+
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @delta: integer value to subtract
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically subtracts @delta from @ptr and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+int atomic64_sub_and_test(u64 delta, atomic64_t *ptr)
+{
+ u64 new_val = atomic64_sub_return(delta, ptr);
+
+ return new_val == 0;
+}
+EXPORT_SYMBOL(atomic64_sub_and_test);
+
+/**
+ * atomic64_inc - increment atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically increments @ptr by 1.
+ */
+void atomic64_inc(atomic64_t *ptr)
+{
+ atomic64_add(1, ptr);
+}
+EXPORT_SYMBOL(atomic64_inc);
+
+/**
+ * atomic64_dec - decrement atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically decrements @ptr by 1.
+ */
+void atomic64_dec(atomic64_t *ptr)
+{
+ atomic64_sub(1, ptr);
+}
+EXPORT_SYMBOL(atomic64_dec);
+
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically decrements @ptr by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+int atomic64_dec_and_test(atomic64_t *ptr)
+{
+ return atomic64_sub_and_test(1, ptr);
+}
+EXPORT_SYMBOL(atomic64_dec_and_test);
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically increments @ptr by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+int atomic64_inc_and_test(atomic64_t *ptr)
+{
+ return atomic64_sub_and_test(-1, ptr);
+}
+EXPORT_SYMBOL(atomic64_inc_and_test);
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @delta: integer value to add
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically adds @delta to @ptr and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+int atomic64_add_negative(u64 delta, atomic64_t *ptr)
+{
+ s64 new_val = atomic64_add_return(delta, ptr);
+
+ return new_val < 0;
+}
+EXPORT_SYMBOL(atomic64_add_negative);
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 9a10a78bb4a4..ebeafcce04a9 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -5,15 +5,14 @@
* Zero a page.
* rdi page
*/
- ALIGN
-clear_page_c:
+ENTRY(clear_page_c)
CFI_STARTPROC
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
ret
CFI_ENDPROC
-ENDPROC(clear_page)
+ENDPROC(clear_page_c)
ENTRY(clear_page)
CFI_STARTPROC
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index f118c110af32..6ba0f7bb85ea 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -75,6 +75,7 @@ ENTRY(copy_to_user)
jae bad_to_user
ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
CFI_ENDPROC
+ENDPROC(copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(copy_from_user)
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f4568605d7d5..ff485d361182 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -55,8 +55,10 @@ static void delay_tsc(unsigned long loops)
preempt_disable();
cpu = smp_processor_id();
+ rdtsc_barrier();
rdtscl(bclock);
for (;;) {
+ rdtsc_barrier();
rdtscl(now);
if ((now - bclock) >= loops)
break;
@@ -78,6 +80,7 @@ static void delay_tsc(unsigned long loops)
if (unlikely(cpu != smp_processor_id())) {
loops -= (now - bclock);
cpu = smp_processor_id();
+ rdtsc_barrier();
rdtscl(bclock);
}
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 78a5fff857be..9bf7e52c2869 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -285,26 +285,25 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address,
tsk->thread.screen_bitmap |= 1 << bit;
}
-static void dump_pagetable(unsigned long address)
+static bool low_pfn(unsigned long pfn)
{
- __typeof__(pte_val(__pte(0))) page;
+ return pfn < max_low_pfn;
+}
- page = read_cr3();
- page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
+static void dump_pagetable(unsigned long address)
+{
+ pgd_t *base = __va(read_cr3());
+ pgd_t *pgd = &base[pgd_index(address)];
+ pmd_t *pmd;
+ pte_t *pte;
#ifdef CONFIG_X86_PAE
- printk("*pdpt = %016Lx ", page);
- if ((page >> PAGE_SHIFT) < max_low_pfn
- && page & _PAGE_PRESENT) {
- page &= PAGE_MASK;
- page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
- & (PTRS_PER_PMD - 1)];
- printk(KERN_CONT "*pde = %016Lx ", page);
- page &= ~_PAGE_NX;
- }
-#else
- printk("*pde = %08lx ", page);
+ printk("*pdpt = %016Lx ", pgd_val(*pgd));
+ if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
+ goto out;
#endif
+ pmd = pmd_offset(pud_offset(pgd, address), address);
+ printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
/*
* We must not directly access the pte in the highpte
@@ -312,16 +311,12 @@ static void dump_pagetable(unsigned long address)
* And let's rather not kmap-atomic the pte, just in case
* it's allocated already:
*/
- if ((page >> PAGE_SHIFT) < max_low_pfn
- && (page & _PAGE_PRESENT)
- && !(page & _PAGE_PSE)) {
-
- page &= PAGE_MASK;
- page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
- & (PTRS_PER_PTE - 1)];
- printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
- }
+ if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
+ goto out;
+ pte = pte_offset_kernel(pmd, address);
+ printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
+out:
printk("\n");
}
@@ -449,16 +444,12 @@ static int bad_address(void *p)
static void dump_pagetable(unsigned long address)
{
- pgd_t *pgd;
+ pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+ pgd_t *pgd = base + pgd_index(address);
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- pgd = (pgd_t *)read_cr3();
-
- pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
-
- pgd += pgd_index(address);
if (bad_address(pgd))
goto bad;
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 58f621e81919..0c6f43cee25d 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -24,7 +24,7 @@ void kunmap(struct page *page)
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
- * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e4086f..0607119cef94 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -12,6 +12,7 @@
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
+#include <asm/proto.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -177,20 +178,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
return nr_range;
}
-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
- if (direct_gbpages && cpu_has_gbpages)
- printk(KERN_INFO "Using GB pages for direct mapping\n");
- else
- direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +197,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
- if (!after_bootmem)
- init_gbpages();
-
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index c4378f4fd4a5..b177652251a4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -598,6 +598,8 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
+ /* clear the default setting with node 0 */
+ nodes_clear(node_states[N_NORMAL_MEMORY]);
free_area_init_nodes(max_zone_pfns);
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3cfe9ced8a4c..c106f7852424 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -11,6 +11,8 @@
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/pfn.h>
+#include <linux/percpu.h>
#include <asm/e820.h>
#include <asm/processor.h>
@@ -681,8 +683,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
static int cpa_process_alias(struct cpa_data *cpa)
{
struct cpa_data alias_cpa;
- int ret = 0;
- unsigned long temp_cpa_vaddr, vaddr;
+ unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
+ unsigned long vaddr, remapped;
+ int ret;
if (cpa->pfn >= max_pfn_mapped)
return 0;
@@ -706,42 +709,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
alias_cpa = *cpa;
- temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
- alias_cpa.vaddr = &temp_cpa_vaddr;
+ alias_cpa.vaddr = &laddr;
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
-
ret = __change_page_attr_set_clr(&alias_cpa, 0);
+ if (ret)
+ return ret;
}
#ifdef CONFIG_X86_64
- if (ret)
- return ret;
/*
- * No need to redo, when the primary call touched the high
- * mapping already:
- */
- if (within(vaddr, (unsigned long) _text, _brk_end))
- return 0;
-
- /*
- * If the physical address is inside the kernel map, we need
+ * If the primary call didn't touch the high mapping already
+ * and the physical address is inside the kernel map, we need
* to touch the high mapped kernel as well:
*/
- if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
- return 0;
+ if (!within(vaddr, (unsigned long)_text, _brk_end) &&
+ within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
+ unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
+ __START_KERNEL_map - phys_base;
+ alias_cpa = *cpa;
+ alias_cpa.vaddr = &temp_cpa_vaddr;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
- alias_cpa = *cpa;
- temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
- alias_cpa.vaddr = &temp_cpa_vaddr;
- alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+ /*
+ * The high mapping range is imprecise, so ignore the
+ * return value.
+ */
+ __change_page_attr_set_clr(&alias_cpa, 0);
+ }
+#endif
/*
- * The high mapping range is imprecise, so ignore the return value.
+ * If the PMD page was partially used for per-cpu remapping,
+ * the recycled area needs to be split and modified. Because
+ * the area is always proper subset of a PMD page
+ * cpa->numpages is guaranteed to be 1 for these areas, so
+ * there's no need to loop over and check for further remaps.
*/
- __change_page_attr_set_clr(&alias_cpa, 0);
-#endif
- return ret;
+ remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
+ if (remapped) {
+ WARN_ON(cpa->numpages > 1);
+ alias_cpa = *cpa;
+ alias_cpa.vaddr = &remapped;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+ ret = __change_page_attr_set_clr(&alias_cpa, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e97017e95..010b2e575002 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -59,7 +59,8 @@ void leave_mm(int cpu)
{
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
- cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
+ cpumask_clear_cpu(cpu,
+ mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
@@ -235,8 +236,8 @@ void flush_tlb_current_task(void)
preempt_disable();
local_flush_tlb();
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -250,8 +251,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm(smp_processor_id());
}
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -269,8 +270,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id());
}
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, va);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, va);
preempt_enable();
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index b07dd8d0b321..28ee490c1b80 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -31,6 +31,26 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
/* 0 == registered but off, 1 == registered and on */
static int nmi_enabled = 0;
+/* common functions */
+
+u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ struct op_counter_config *counter_config)
+{
+ u64 val = 0;
+ u16 event = (u16)counter_config->event;
+
+ val |= ARCH_PERFMON_EVENTSEL_INT;
+ val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
+ val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
+ val |= (counter_config->unit_mask & 0xFF) << 8;
+ event &= model->event_mask ? model->event_mask : 0xFF;
+ val |= event & 0xFF;
+ val |= (event & 0x0F00) << 24;
+
+ return val;
+}
+
+
static int profile_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -52,26 +72,18 @@ static int profile_exceptions_notify(struct notifier_block *self,
static void nmi_cpu_save_registers(struct op_msrs *msrs)
{
- unsigned int const nr_ctrs = model->num_counters;
- unsigned int const nr_ctrls = model->num_controls;
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
- for (i = 0; i < nr_ctrs; ++i) {
- if (counters[i].addr) {
- rdmsr(counters[i].addr,
- counters[i].saved.low,
- counters[i].saved.high);
- }
+ for (i = 0; i < model->num_counters; ++i) {
+ if (counters[i].addr)
+ rdmsrl(counters[i].addr, counters[i].saved);
}
- for (i = 0; i < nr_ctrls; ++i) {
- if (controls[i].addr) {
- rdmsr(controls[i].addr,
- controls[i].saved.low,
- controls[i].saved.high);
- }
+ for (i = 0; i < model->num_controls; ++i) {
+ if (controls[i].addr)
+ rdmsrl(controls[i].addr, controls[i].saved);
}
}
@@ -126,7 +138,7 @@ static void nmi_cpu_setup(void *dummy)
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
spin_lock(&oprofilefs_lock);
- model->setup_ctrs(msrs);
+ model->setup_ctrs(model, msrs);
spin_unlock(&oprofilefs_lock);
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
@@ -178,26 +190,18 @@ static int nmi_setup(void)
static void nmi_restore_registers(struct op_msrs *msrs)
{
- unsigned int const nr_ctrs = model->num_counters;
- unsigned int const nr_ctrls = model->num_controls;
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
- for (i = 0; i < nr_ctrls; ++i) {
- if (controls[i].addr) {
- wrmsr(controls[i].addr,
- controls[i].saved.low,
- controls[i].saved.high);
- }
+ for (i = 0; i < model->num_controls; ++i) {
+ if (controls[i].addr)
+ wrmsrl(controls[i].addr, controls[i].saved);
}
- for (i = 0; i < nr_ctrs; ++i) {
- if (counters[i].addr) {
- wrmsr(counters[i].addr,
- counters[i].saved.low,
- counters[i].saved.high);
- }
+ for (i = 0; i < model->num_counters; ++i) {
+ if (counters[i].addr)
+ wrmsrl(counters[i].addr, counters[i].saved);
}
}
@@ -402,6 +406,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
static int __init ppro_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
+ struct op_x86_model_spec const *spec = &op_ppro_spec; /* default */
if (force_arch_perfmon && cpu_has_arch_perfmon)
return 0;
@@ -428,7 +433,7 @@ static int __init ppro_init(char **cpu_type)
*cpu_type = "i386/core_2";
break;
case 26:
- arch_perfmon_setup_counters();
+ spec = &op_arch_perfmon_spec;
*cpu_type = "i386/core_i7";
break;
case 28:
@@ -439,17 +444,7 @@ static int __init ppro_init(char **cpu_type)
return 0;
}
- model = &op_ppro_spec;
- return 1;
-}
-
-static int __init arch_perfmon_init(char **cpu_type)
-{
- if (!cpu_has_arch_perfmon)
- return 0;
- *cpu_type = "i386/arch_perfmon";
- model = &op_arch_perfmon_spec;
- arch_perfmon_setup_counters();
+ model = spec;
return 1;
}
@@ -471,27 +466,26 @@ int __init op_nmi_init(struct oprofile_operations *ops)
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
switch (family) {
- default:
- return -ENODEV;
case 6:
- model = &op_amd_spec;
cpu_type = "i386/athlon";
break;
case 0xf:
- model = &op_amd_spec;
- /* Actually it could be i386/hammer too, but give
- user space an consistent name. */
+ /*
+ * Actually it could be i386/hammer too, but
+ * give user space an consistent name.
+ */
cpu_type = "x86-64/hammer";
break;
case 0x10:
- model = &op_amd_spec;
cpu_type = "x86-64/family10";
break;
case 0x11:
- model = &op_amd_spec;
cpu_type = "x86-64/family11h";
break;
+ default:
+ return -ENODEV;
}
+ model = &op_amd_spec;
break;
case X86_VENDOR_INTEL:
@@ -510,8 +504,15 @@ int __init op_nmi_init(struct oprofile_operations *ops)
break;
}
- if (!cpu_type && !arch_perfmon_init(&cpu_type))
+ if (cpu_type)
+ break;
+
+ if (!cpu_has_arch_perfmon)
return -ENODEV;
+
+ /* use arch perfmon as fallback */
+ cpu_type = "i386/arch_perfmon";
+ model = &op_arch_perfmon_spec;
break;
default:
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 8fdf06e4edf9..cc930467575d 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -25,43 +25,28 @@
#define NUM_COUNTERS 4
#define NUM_CONTROLS 4
+#define OP_EVENT_MASK 0x0FFF
+#define OP_CTR_OVERFLOW (1ULL<<31)
-#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
-#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
-#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
-
-#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
-#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
-#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
-#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR_LO(x) (x &= (1<<21))
-#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
-#define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
-#define CTRL_SET_UM(val, m) (val |= (m << 8))
-#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
-#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
-#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
-#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
+#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
static unsigned long reset_value[NUM_COUNTERS];
#ifdef CONFIG_OPROFILE_IBS
/* IbsFetchCtl bits/masks */
-#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */
-#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */
-#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
/*IbsOpCtl bits */
-#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
-#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
-#define IBS_FETCH_SIZE 6
-#define IBS_OP_SIZE 12
+#define IBS_FETCH_SIZE 6
+#define IBS_OP_SIZE 12
static int has_ibs; /* AMD Family10h and later */
@@ -99,49 +84,38 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
}
}
-
-static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
+static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
/* clear all counters */
for (i = 0 ; i < NUM_CONTROLS; ++i) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->controls[i].addr))
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR_LO(low);
- CTRL_CLEAR_HI(high);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ wrmsrl(msrs->controls[i].addr, val);
}
/* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < NUM_COUNTERS; ++i) {
- if (unlikely(!CTR_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->counters[i].addr))
continue;
- CTR_WRITE(1, msrs, i);
+ wrmsrl(msrs->counters[i].addr, -1LL);
}
/* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) {
- if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
+ if (counter_config[i].enabled && msrs->counters[i].addr) {
reset_value[i] = counter_config[i].count;
-
- CTR_WRITE(counter_config[i].count, msrs, i);
-
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR_LO(low);
- CTRL_CLEAR_HI(high);
- CTRL_SET_ENABLE(low);
- CTRL_SET_USR(low, counter_config[i].user);
- CTRL_SET_KERN(low, counter_config[i].kernel);
- CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT_LOW(low, counter_config[i].event);
- CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
- CTRL_SET_HOST_ONLY(high, 0);
- CTRL_SET_GUEST_ONLY(high, 0);
-
- CTRL_WRITE(low, high, msrs, i);
+ wrmsrl(msrs->counters[i].addr,
+ -(s64)counter_config[i].count);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[i]);
+ wrmsrl(msrs->controls[i].addr, val);
} else {
reset_value[i] = 0;
}
@@ -154,93 +128,116 @@ static inline int
op_amd_handle_ibs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
- u32 low, high;
- u64 msr;
+ u64 val, ctl;
struct op_entry entry;
if (!has_ibs)
return 1;
if (ibs_config.fetch_enabled) {
- rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
- if (high & IBS_FETCH_HIGH_VALID_BIT) {
- rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
- oprofile_write_reserve(&entry, regs, msr,
+ rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
+ if (ctl & IBS_FETCH_VAL) {
+ rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
+ oprofile_write_reserve(&entry, regs, val,
IBS_FETCH_CODE, IBS_FETCH_SIZE);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- oprofile_add_data(&entry, low);
- oprofile_add_data(&entry, high);
- rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_add_data64(&entry, val);
+ oprofile_add_data64(&entry, ctl);
+ rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
+ oprofile_add_data64(&entry, val);
oprofile_write_commit(&entry);
/* reenable the IRQ */
- high &= ~IBS_FETCH_HIGH_VALID_BIT;
- high |= IBS_FETCH_HIGH_ENABLE;
- low &= IBS_FETCH_LOW_MAX_CNT_MASK;
- wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
+ ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
+ ctl |= IBS_FETCH_ENABLE;
+ wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
}
}
if (ibs_config.op_enabled) {
- rdmsr(MSR_AMD64_IBSOPCTL, low, high);
- if (low & IBS_OP_LOW_VALID_BIT) {
- rdmsrl(MSR_AMD64_IBSOPRIP, msr);
- oprofile_write_reserve(&entry, regs, msr,
+ rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
+ if (ctl & IBS_OP_VAL) {
+ rdmsrl(MSR_AMD64_IBSOPRIP, val);
+ oprofile_write_reserve(&entry, regs, val,
IBS_OP_CODE, IBS_OP_SIZE);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSOPDATA, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSOPDATA, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSOPDATA2, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSOPDATA3, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSDCLINAD, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
+ oprofile_add_data64(&entry, val);
oprofile_write_commit(&entry);
/* reenable the IRQ */
- high = 0;
- low &= ~IBS_OP_LOW_VALID_BIT;
- low |= IBS_OP_LOW_ENABLE;
- wrmsr(MSR_AMD64_IBSOPCTL, low, high);
+ ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
+ ctl |= IBS_OP_ENABLE;
+ wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
}
}
return 1;
}
+static inline void op_amd_start_ibs(void)
+{
+ u64 val;
+ if (has_ibs && ibs_config.fetch_enabled) {
+ val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
+ val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
+ val |= IBS_FETCH_ENABLE;
+ wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
+ }
+
+ if (has_ibs && ibs_config.op_enabled) {
+ val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
+ val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+ val |= IBS_OP_ENABLE;
+ wrmsrl(MSR_AMD64_IBSOPCTL, val);
+ }
+}
+
+static void op_amd_stop_ibs(void)
+{
+ if (has_ibs && ibs_config.fetch_enabled)
+ /* clear max count and enable */
+ wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
+
+ if (has_ibs && ibs_config.op_enabled)
+ /* clear max count and enable */
+ wrmsrl(MSR_AMD64_IBSOPCTL, 0);
+}
+
+#else
+
+static inline int op_amd_handle_ibs(struct pt_regs * const regs,
+ struct op_msrs const * const msrs) { }
+static inline void op_amd_start_ibs(void) { }
+static inline void op_amd_stop_ibs(void) { }
+
#endif
static int op_amd_check_ctrs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
for (i = 0 ; i < NUM_COUNTERS; ++i) {
if (!reset_value[i])
continue;
- CTR_READ(low, high, msrs, i);
- if (CTR_OVERFLOWED(low)) {
- oprofile_add_sample(regs, i);
- CTR_WRITE(reset_value[i], msrs, i);
- }
+ rdmsrl(msrs->counters[i].addr, val);
+ /* bit is clear if overflowed: */
+ if (val & OP_CTR_OVERFLOW)
+ continue;
+ oprofile_add_sample(regs, i);
+ wrmsrl(msrs->counters[i].addr, -(s64)reset_value[i]);
}
-#ifdef CONFIG_OPROFILE_IBS
op_amd_handle_ibs(regs, msrs);
-#endif
/* See op_model_ppro.c */
return 1;
@@ -248,38 +245,22 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
static void op_amd_start(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
for (i = 0 ; i < NUM_COUNTERS ; ++i) {
if (reset_value[i]) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_ACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
}
-#ifdef CONFIG_OPROFILE_IBS
- if (has_ibs && ibs_config.fetch_enabled) {
- low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
- high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
- + IBS_FETCH_HIGH_ENABLE;
- wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
- }
-
- if (has_ibs && ibs_config.op_enabled) {
- low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
- + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
- + IBS_OP_LOW_ENABLE;
- high = 0;
- wrmsr(MSR_AMD64_IBSOPCTL, low, high);
- }
-#endif
+ op_amd_start_ibs();
}
-
static void op_amd_stop(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
/*
@@ -289,26 +270,12 @@ static void op_amd_stop(struct op_msrs const * const msrs)
for (i = 0 ; i < NUM_COUNTERS ; ++i) {
if (!reset_value[i])
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_INACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
- }
-
-#ifdef CONFIG_OPROFILE_IBS
- if (has_ibs && ibs_config.fetch_enabled) {
- /* clear max count and enable */
- low = 0;
- high = 0;
- wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
- if (has_ibs && ibs_config.op_enabled) {
- /* clear max count and enable */
- low = 0;
- high = 0;
- wrmsr(MSR_AMD64_IBSOPCTL, low, high);
- }
-#endif
+ op_amd_stop_ibs();
}
static void op_amd_shutdown(struct op_msrs const * const msrs)
@@ -316,11 +283,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
int i;
for (i = 0 ; i < NUM_COUNTERS ; ++i) {
- if (CTR_IS_RESERVED(msrs, i))
+ if (msrs->counters[i].addr)
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
}
for (i = 0 ; i < NUM_CONTROLS ; ++i) {
- if (CTRL_IS_RESERVED(msrs, i))
+ if (msrs->controls[i].addr)
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
}
@@ -491,14 +458,16 @@ static void op_amd_exit(void) {}
#endif /* CONFIG_OPROFILE_IBS */
struct op_x86_model_spec const op_amd_spec = {
- .init = op_amd_init,
- .exit = op_amd_exit,
.num_counters = NUM_COUNTERS,
.num_controls = NUM_CONTROLS,
+ .reserved = MSR_AMD_EVENTSEL_RESERVED,
+ .event_mask = OP_EVENT_MASK,
+ .init = op_amd_init,
+ .exit = op_amd_exit,
.fill_in_addresses = &op_amd_fill_in_addresses,
.setup_ctrs = &op_amd_setup_ctrs,
.check_ctrs = &op_amd_check_ctrs,
.start = &op_amd_start,
.stop = &op_amd_stop,
- .shutdown = &op_amd_shutdown
+ .shutdown = &op_amd_shutdown,
};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 819b131fd752..f01e53b118fa 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -32,6 +32,8 @@
#define NUM_CCCRS_HT2 9
#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
+#define OP_CTR_OVERFLOW (1ULL<<31)
+
static unsigned int num_counters = NUM_COUNTERS_NON_HT;
static unsigned int num_controls = NUM_CONTROLS_NON_HT;
@@ -350,8 +352,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
-#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
-#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
#define CCCR_RESERVED_BITS 0x38030FFF
#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -361,17 +361,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
-#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
-#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
-#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
-#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
-#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
-
/* this assigns a "stagger" to the current CPU, which is used throughout
the code in this module as an extra array offset, to select the "even"
@@ -515,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
if (ev->bindings[i].virt_counter & counter_bit) {
/* modify ESCR */
- ESCR_READ(escr, high, ev, i);
+ rdmsr(ev->bindings[i].escr_address, escr, high);
ESCR_CLEAR(escr);
if (stag == 0) {
ESCR_SET_USR_0(escr, counter_config[ctr].user);
@@ -526,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
}
ESCR_SET_EVENT_SELECT(escr, ev->event_select);
ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
- ESCR_WRITE(escr, high, ev, i);
+ wrmsr(ev->bindings[i].escr_address, escr, high);
/* modify CCCR */
- CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
+ rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
+ cccr, high);
CCCR_CLEAR(cccr);
CCCR_SET_REQUIRED_BITS(cccr);
CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
@@ -537,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
CCCR_SET_PMI_OVF_0(cccr);
else
CCCR_SET_PMI_OVF_1(cccr);
- CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
+ wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
+ cccr, high);
return;
}
}
@@ -548,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
}
-static void p4_setup_ctrs(struct op_msrs const * const msrs)
+static void p4_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
{
unsigned int i;
unsigned int low, high;
@@ -564,7 +559,7 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
/* clear the cccrs we will use */
for (i = 0 ; i < num_counters ; i++) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->controls[i].addr))
continue;
rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_CLEAR(low);
@@ -574,17 +569,18 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
/* clear all escrs (including those outside our concern) */
for (i = num_counters; i < num_controls; i++) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->controls[i].addr))
continue;
wrmsr(msrs->controls[i].addr, 0, 0);
}
/* setup all counters */
for (i = 0 ; i < num_counters ; ++i) {
- if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
+ if (counter_config[i].enabled && msrs->controls[i].addr) {
reset_value[i] = counter_config[i].count;
pmc_setup_one_p4_counter(i);
- CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
+ wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address,
+ -(s64)counter_config[i].count);
} else {
reset_value[i] = 0;
}
@@ -624,14 +620,16 @@ static int p4_check_ctrs(struct pt_regs * const regs,
real = VIRT_CTR(stag, i);
- CCCR_READ(low, high, real);
- CTR_READ(ctr, high, real);
- if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
+ rdmsr(p4_counters[real].cccr_address, low, high);
+ rdmsr(p4_counters[real].counter_address, ctr, high);
+ if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) {
oprofile_add_sample(regs, i);
- CTR_WRITE(reset_value[i], real);
+ wrmsrl(p4_counters[real].counter_address,
+ -(s64)reset_value[i]);
CCCR_CLEAR_OVF(low);
- CCCR_WRITE(low, high, real);
- CTR_WRITE(reset_value[i], real);
+ wrmsr(p4_counters[real].cccr_address, low, high);
+ wrmsrl(p4_counters[real].counter_address,
+ -(s64)reset_value[i]);
}
}
@@ -653,9 +651,9 @@ static void p4_start(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CCCR_READ(low, high, VIRT_CTR(stag, i));
+ rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_SET_ENABLE(low);
- CCCR_WRITE(low, high, VIRT_CTR(stag, i));
+ wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
}
}
@@ -670,9 +668,9 @@ static void p4_stop(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CCCR_READ(low, high, VIRT_CTR(stag, i));
+ rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_SET_DISABLE(low);
- CCCR_WRITE(low, high, VIRT_CTR(stag, i));
+ wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
}
}
@@ -681,7 +679,7 @@ static void p4_shutdown(struct op_msrs const * const msrs)
int i;
for (i = 0 ; i < num_counters ; ++i) {
- if (CTR_IS_RESERVED(msrs, i))
+ if (msrs->counters[i].addr)
release_perfctr_nmi(msrs->counters[i].addr);
}
/*
@@ -690,7 +688,7 @@ static void p4_shutdown(struct op_msrs const * const msrs)
* This saves a few bits.
*/
for (i = num_counters ; i < num_controls ; ++i) {
- if (CTRL_IS_RESERVED(msrs, i))
+ if (msrs->controls[i].addr)
release_evntsel_nmi(msrs->controls[i].addr);
}
}
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 4da7230b3d17..cd72d5c73b49 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -10,6 +10,7 @@
* @author Philippe Elie
* @author Graydon Hoare
* @author Andi Kleen
+ * @author Robert Richter <robert.richter@amd.com>
*/
#include <linux/oprofile.h>
@@ -18,7 +19,6 @@
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/nmi.h>
-#include <asm/perf_counter.h>
#include "op_x86_model.h"
#include "op_counter.h"
@@ -26,20 +26,7 @@
static int num_counters = 2;
static int counter_width = 32;
-#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
-
-#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
-#define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
-#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
-#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR(x) (x &= (1<<21))
-#define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
-#define CTRL_SET_UM(val, m) (val |= (m << 8))
-#define CTRL_SET_EVENT(val, e) (val |= e)
+#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
static u64 *reset_value;
@@ -63,9 +50,10 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
}
-static void ppro_setup_ctrs(struct op_msrs const * const msrs)
+static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
if (!reset_value) {
@@ -94,35 +82,29 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
/* clear all counters */
for (i = 0 ; i < num_counters; ++i) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->controls[i].addr))
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ wrmsrl(msrs->controls[i].addr, val);
}
/* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < num_counters; ++i) {
- if (unlikely(!CTR_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->counters[i].addr))
continue;
wrmsrl(msrs->counters[i].addr, -1LL);
}
/* enable active counters */
for (i = 0; i < num_counters; ++i) {
- if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
+ if (counter_config[i].enabled && msrs->counters[i].addr) {
reset_value[i] = counter_config[i].count;
-
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
-
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
- CTRL_SET_ENABLE(low);
- CTRL_SET_USR(low, counter_config[i].user);
- CTRL_SET_KERN(low, counter_config[i].kernel);
- CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT(low, counter_config[i].event);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[i]);
+ wrmsrl(msrs->controls[i].addr, val);
} else {
reset_value[i] = 0;
}
@@ -147,10 +129,10 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
if (!reset_value[i])
continue;
rdmsrl(msrs->counters[i].addr, val);
- if (CTR_OVERFLOWED(val)) {
- oprofile_add_sample(regs, i);
- wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- }
+ if (val & (1ULL << (counter_width - 1)))
+ continue;
+ oprofile_add_sample(regs, i);
+ wrmsrl(msrs->counters[i].addr, -reset_value[i]);
}
out:
@@ -171,16 +153,16 @@ out:
static void ppro_start(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
if (!reset_value)
return;
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_ACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
}
}
@@ -188,7 +170,7 @@ static void ppro_start(struct op_msrs const * const msrs)
static void ppro_stop(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
if (!reset_value)
@@ -196,9 +178,9 @@ static void ppro_stop(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_INACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
}
@@ -207,11 +189,11 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
int i;
for (i = 0 ; i < num_counters ; ++i) {
- if (CTR_IS_RESERVED(msrs, i))
+ if (msrs->counters[i].addr)
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
}
for (i = 0 ; i < num_counters ; ++i) {
- if (CTRL_IS_RESERVED(msrs, i))
+ if (msrs->controls[i].addr)
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
}
if (reset_value) {
@@ -221,9 +203,10 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
}
-struct op_x86_model_spec op_ppro_spec = {
- .num_counters = 2, /* can be overriden */
- .num_controls = 2, /* dito */
+struct op_x86_model_spec const op_ppro_spec = {
+ .num_counters = 2,
+ .num_controls = 2,
+ .reserved = MSR_PPRO_EVENTSEL_RESERVED,
.fill_in_addresses = &ppro_fill_in_addresses,
.setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs,
@@ -241,7 +224,7 @@ struct op_x86_model_spec op_ppro_spec = {
* the specific CPU.
*/
-void arch_perfmon_setup_counters(void)
+static void arch_perfmon_setup_counters(void)
{
union cpuid10_eax eax;
@@ -259,11 +242,17 @@ void arch_perfmon_setup_counters(void)
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
- op_ppro_spec.num_counters = num_counters;
- op_ppro_spec.num_controls = num_counters;
+}
+
+static int arch_perfmon_init(struct oprofile_operations *ignore)
+{
+ arch_perfmon_setup_counters();
+ return 0;
}
struct op_x86_model_spec op_arch_perfmon_spec = {
+ .reserved = MSR_PPRO_EVENTSEL_RESERVED,
+ .init = &arch_perfmon_init,
/* num_counters/num_controls filled in at runtime */
.fill_in_addresses = &ppro_fill_in_addresses,
/* user space does the cpuid check for available events */
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 825e79064d64..505489873b9d 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -6,19 +6,18 @@
* @remark Read the file COPYING
*
* @author Graydon Hoare
+ * @author Robert Richter <robert.richter@amd.com>
*/
#ifndef OP_X86_MODEL_H
#define OP_X86_MODEL_H
-struct op_saved_msr {
- unsigned int high;
- unsigned int low;
-};
+#include <asm/types.h>
+#include <asm/perf_counter.h>
struct op_msr {
- unsigned long addr;
- struct op_saved_msr saved;
+ unsigned long addr;
+ u64 saved;
};
struct op_msrs {
@@ -28,29 +27,37 @@ struct op_msrs {
struct pt_regs;
+struct oprofile_operations;
+
/* The model vtable abstracts the differences between
* various x86 CPU models' perfctr support.
*/
struct op_x86_model_spec {
- int (*init)(struct oprofile_operations *ops);
- void (*exit)(void);
- unsigned int num_counters;
- unsigned int num_controls;
- void (*fill_in_addresses)(struct op_msrs * const msrs);
- void (*setup_ctrs)(struct op_msrs const * const msrs);
- int (*check_ctrs)(struct pt_regs * const regs,
- struct op_msrs const * const msrs);
- void (*start)(struct op_msrs const * const msrs);
- void (*stop)(struct op_msrs const * const msrs);
- void (*shutdown)(struct op_msrs const * const msrs);
+ unsigned int num_counters;
+ unsigned int num_controls;
+ u64 reserved;
+ u16 event_mask;
+ int (*init)(struct oprofile_operations *ops);
+ void (*exit)(void);
+ void (*fill_in_addresses)(struct op_msrs * const msrs);
+ void (*setup_ctrs)(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs);
+ int (*check_ctrs)(struct pt_regs * const regs,
+ struct op_msrs const * const msrs);
+ void (*start)(struct op_msrs const * const msrs);
+ void (*stop)(struct op_msrs const * const msrs);
+ void (*shutdown)(struct op_msrs const * const msrs);
};
-extern struct op_x86_model_spec op_ppro_spec;
+struct op_counter_config;
+
+extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ struct op_counter_config *counter_config);
+
+extern struct op_x86_model_spec const op_ppro_spec;
extern struct op_x86_model_spec const op_p4_spec;
extern struct op_x86_model_spec const op_p4_ht2_spec;
extern struct op_x86_model_spec const op_amd_spec;
extern struct op_x86_model_spec op_arch_perfmon_spec;
-extern void arch_perfmon_setup_counters(void);
-
#endif /* OP_X86_MODEL_H */
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index b26626dc517c..1014eb4bfc37 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -68,6 +68,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
unsigned long flags;
struct resource *root;
int max_root_bus_resources = PCI_BUS_NUM_RESOURCES;
+ u64 start, end;
+
+ if (bus_has_transparent_bridge(info->bus))
+ max_root_bus_resources -= 3;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -84,25 +88,24 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
} else
return AE_OK;
- res = &info->res[info->res_num];
- res->name = info->name;
- res->flags = flags;
- res->start = addr.minimum + addr.translation_offset;
- res->end = res->start + addr.address_length - 1;
- res->child = NULL;
-
- if (bus_has_transparent_bridge(info->bus))
- max_root_bus_resources -= 3;
+ start = addr.minimum + addr.translation_offset;
+ end = start + addr.address_length - 1;
if (info->res_num >= max_root_bus_resources) {
printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
"from %s for %s due to _CRS returning more than "
- "%d resource descriptors\n", (unsigned long) res->start,
- (unsigned long) res->end, root->name, info->name,
+ "%d resource descriptors\n", (unsigned long) start,
+ (unsigned long) end, root->name, info->name,
max_root_bus_resources);
- info->res_num++;
return AE_OK;
}
+ res = &info->res[info->res_num];
+ res->name = info->name;
+ res->flags = flags;
+ res->start = start;
+ res->end = end;
+ res->child = NULL;
+
if (insert_resource(root, res)) {
printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
"from %s for %s\n", (unsigned long) res->start,
@@ -115,23 +118,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
}
static void
-adjust_transparent_bridge_resources(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- int i;
- u16 class = dev->class >> 8;
-
- if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent) {
- for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
- dev->subordinate->resource[i] =
- dev->bus->resource[i - 3];
- }
- }
-}
-
-static void
get_current_resources(struct acpi_device *device, int busnum,
int domain, struct pci_bus *bus)
{
@@ -158,8 +144,6 @@ get_current_resources(struct acpi_device *device, int busnum,
info.res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
&info);
- if (info.res_num)
- adjust_transparent_bridge_resources(bus);
return;
@@ -222,8 +206,15 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
*/
memcpy(bus->sysdata, sd, sizeof(*sd));
kfree(sd);
- } else
- bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
+ } else {
+ bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
+ if (bus) {
+ if (pci_probe & PCI_USE__CRS)
+ get_current_resources(device, busnum, domain,
+ bus);
+ bus->subordinate = pci_scan_child_bus(bus);
+ }
+ }
if (!bus)
kfree(sd);
@@ -238,8 +229,6 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
#endif
}
- if (bus && (pci_probe & PCI_USE__CRS))
- get_current_resources(device, busnum, domain, bus);
return bus;
}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index f893d6a6e803..3ffa10df20b9 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -100,8 +100,9 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
int j;
struct pci_root_info *info;
- /* don't go for it if _CRS is used */
- if (pci_probe & PCI_USE__CRS)
+ /* don't go for it if _CRS is used already */
+ if (b->resource[0] != &ioport_resource ||
+ b->resource[1] != &iomem_resource)
return;
/* if only one root bus, don't need to anything */
@@ -116,6 +117,9 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
if (i == pci_root_num)
return;
+ printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
+ b->number);
+
info = &pci_root_info[i];
for (j = 0; j < info->res_num; j++) {
struct resource *res;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index d277ef1eea51..b3d20b9cac63 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -244,7 +244,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
do_fpu_end();
mtrr_ap_init();
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_OLD_MCE
mcheck_init(&boot_cpu_data);
#endif
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 4ceb28581652..6fa64cb1b01e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
+ if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
}
return;
}
- cpumask_copy(mask, &mm->cpu_vm_mask);
+ cpumask_copy(mask, mm_cpumask(mm));
/* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 41c159cd872f..b1e24638acd7 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -287,6 +287,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
}
.xt.lit : { *(.xt.lit) }
diff --git a/block/Makefile b/block/Makefile
index e9fa4dd690f2..6c54ed0ff755 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
+ ioctl.o genhd.o scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 7a12cf6ee1d3..ce8ba57c6557 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -146,7 +146,7 @@ enum arq_state {
#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
-static DEFINE_PER_CPU(unsigned long, ioc_count);
+static DEFINE_PER_CPU(unsigned long, as_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);
@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic)
{
kfree(aic);
- elv_ioc_count_dec(ioc_count);
+ elv_ioc_count_dec(as_ioc_count);
if (ioc_gone) {
/*
* AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
* complete ioc_gone and set it back to NULL.
*/
spin_lock(&ioc_gone_lock);
- if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+ if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
ret->seek_total = 0;
ret->seek_samples = 0;
ret->seek_mean = 0;
- elv_ioc_count_inc(ioc_count);
+ elv_ioc_count_inc(as_ioc_count);
}
return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */
smp_wmb();
- if (elv_ioc_count_read(ioc_count))
+ if (elv_ioc_count_read(as_ioc_count))
wait_for_completion(&all_gone);
synchronize_rcu();
}
diff --git a/block/blk-core.c b/block/blk-core.c
index b06cf5c2a829..0214837befa6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -501,6 +501,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ q->backing_dev_info.name = "block";
err = bdi_init(&q->backing_dev_info);
if (err) {
@@ -595,8 +596,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->sg_reserved_size = INT_MAX;
- blk_set_cmd_filter_defaults(&q->cmd_filter);
-
/*
* all done
*/
@@ -1120,17 +1119,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_type = REQ_TYPE_FS;
/*
- * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+ * Inherit FAILFAST from bio (for read-ahead, and explicit
+ * FAILFAST). FAILFAST flags are identical for req and bio.
*/
if (bio_rw_ahead(bio))
- req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER);
- if (bio_failfast_dev(bio))
- req->cmd_flags |= REQ_FAILFAST_DEV;
- if (bio_failfast_transport(bio))
- req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- if (bio_failfast_driver(bio))
- req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ req->cmd_flags |= REQ_FAILFAST_MASK;
+ else
+ req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
if (unlikely(bio_discard(bio))) {
req->cmd_flags |= REQ_DISCARD;
@@ -1170,8 +1165,14 @@ static int __make_request(struct request_queue *q, struct bio *bio)
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio);
+ const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
+ if (bio_barrier(bio) && bio_has_data(bio) &&
+ (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1194,6 +1195,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
trace_block_bio_backmerge(q, bio);
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ blk_rq_set_mixed_merge(req);
+
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bytes;
@@ -1213,6 +1217,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
trace_block_bio_frontmerge(q, bio);
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
+ blk_rq_set_mixed_merge(req);
+ req->cmd_flags &= ~REQ_FAILFAST_MASK;
+ req->cmd_flags |= ff;
+ }
+
bio->bi_next = req->bio;
req->bio = bio;
@@ -1472,11 +1482,6 @@ static inline void __generic_make_request(struct bio *bio)
err = -EOPNOTSUPP;
goto end_io;
}
- if (bio_barrier(bio) && bio_has_data(bio) &&
- (q->next_ordered == QUEUE_ORDERED_NONE)) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
ret = q->make_request_fn(q, bio);
} while (ret);
@@ -1662,6 +1667,50 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+/**
+ * blk_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+unsigned int blk_rq_err_bytes(const struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_rw & ff) != ff)
+ break;
+ bytes += bio->bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
+
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
@@ -2008,6 +2057,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (blk_fs_request(req) || blk_discard_rq(req))
req->__sector += total_bytes >> 9;
+ /* mixed attributes always follow the first bio */
+ if (req->cmd_flags & REQ_MIXED_MERGE) {
+ req->cmd_flags &= ~REQ_FAILFAST_MASK;
+ req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+ }
+
/*
* If total number of sectors is less than the first segment
* size, something has gone terribly wrong.
@@ -2187,6 +2242,25 @@ bool blk_end_request_cur(struct request *rq, int error)
EXPORT_SYMBOL_GPL(blk_end_request_cur);
/**
+ * blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ * Complete @rq till the next failure boundary.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool blk_end_request_err(struct request *rq, int error)
+{
+ WARN_ON(error >= 0);
+ return blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(blk_end_request_err);
+
+/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
* @error: %0 for success, < %0 for error
@@ -2245,12 +2319,31 @@ bool __blk_end_request_cur(struct request *rq, int error)
}
EXPORT_SYMBOL_GPL(__blk_end_request_cur);
+/**
+ * __blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ * Complete @rq till the next failure boundary. Must be called
+ * with queue lock held.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool __blk_end_request_err(struct request *rq, int error)
+{
+ WARN_ON(error >= 0);
+ return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(__blk_end_request_err);
+
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
- we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
- rq->cmd_flags |= (bio->bi_rw & 3);
+ /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
+ rq->cmd_flags |= bio->bi_rw & REQ_RW;
if (bio_has_data(bio)) {
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -2365,7 +2458,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
__bio_clone(bio, bio_src);
if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, gfp_mask))
+ bio_integrity_clone(bio, bio_src, gfp_mask, bs))
goto free_and_out;
if (bio_ctr && bio_ctr(bio, bio_src, data))
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39ce64432ba6..7c9ca01baa45 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -311,6 +311,36 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
return 1;
}
+/**
+ * blk_rq_set_mixed_merge - mark a request as mixed merge
+ * @rq: request to mark as mixed merge
+ *
+ * Description:
+ * @rq is about to be mixed merged. Make sure the attributes
+ * which can be mixed are set in each bio and mark @rq as mixed
+ * merged.
+ */
+void blk_rq_set_mixed_merge(struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ struct bio *bio;
+
+ if (rq->cmd_flags & REQ_MIXED_MERGE)
+ return;
+
+ /*
+ * @rq will no longer represent mixable attributes for all the
+ * contained bios. It will just track those of the first one.
+ * Distributes the attributs to each bio.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
+ (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
+ bio->bi_rw |= ff;
+ }
+ rq->cmd_flags |= REQ_MIXED_MERGE;
+}
+
static void blk_account_io_merge(struct request *req)
{
if (blk_do_io_stat(req)) {
@@ -350,6 +380,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_integrity_rq(req) != blk_integrity_rq(next))
return 0;
+ /* don't merge requests of different failfast settings */
+ if (blk_failfast_dev(req) != blk_failfast_dev(next) ||
+ blk_failfast_transport(req) != blk_failfast_transport(next) ||
+ blk_failfast_driver(req) != blk_failfast_driver(next))
+ return 0;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -360,6 +396,19 @@ static int attempt_merge(struct request_queue *q, struct request *req,
return 0;
/*
+ * If failfast settings disagree or any of the two is already
+ * a mixed merge, mark both as mixed before proceeding. This
+ * makes sure that all involved bios have mixable attributes
+ * set properly.
+ */
+ if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
+ (req->cmd_flags & REQ_FAILFAST_MASK) !=
+ (next->cmd_flags & REQ_FAILFAST_MASK)) {
+ blk_rq_set_mixed_merge(req);
+ blk_rq_set_mixed_merge(next);
+ }
+
+ /*
* At this point we have either done a back merge
* or front merge. We need the smaller start_time of
* the merged requests to be the current request
diff --git a/block/blk.h b/block/blk.h
index 3fae6add5430..5ee3d7e72feb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -104,6 +104,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
+void blk_rq_set_mixed_merge(struct request *rq);
void blk_queue_congestion_threshold(struct request_queue *q);
diff --git a/block/bsg.c b/block/bsg.c
index e7d475254248..5f184bb3ff9e 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -186,7 +186,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
- if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
+ if (blk_verify_command(rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 833ec18eaa63..5e174604da17 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
-static DEFINE_PER_CPU(unsigned long, ioc_count);
+static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);
@@ -71,6 +71,51 @@ struct cfq_rb_root {
#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
/*
+ * Per process-grouping structure
+ */
+struct cfq_queue {
+ /* reference count */
+ atomic_t ref;
+ /* various state flags, see below */
+ unsigned int flags;
+ /* parent cfq_data */
+ struct cfq_data *cfqd;
+ /* service_tree member */
+ struct rb_node rb_node;
+ /* service_tree key */
+ unsigned long rb_key;
+ /* prio tree member */
+ struct rb_node p_node;
+ /* prio tree root we belong to, if any */
+ struct rb_root *p_root;
+ /* sorted list of pending requests */
+ struct rb_root sort_list;
+ /* if fifo isn't expired, next request to serve */
+ struct request *next_rq;
+ /* requests queued in sort_list */
+ int queued[2];
+ /* currently allocated requests */
+ int allocated[2];
+ /* fifo list of requests in sort_list */
+ struct list_head fifo;
+
+ unsigned long slice_end;
+ long slice_resid;
+ unsigned int slice_dispatch;
+
+ /* pending metadata requests */
+ int meta_pending;
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
+
+ /* io prio of this group */
+ unsigned short ioprio, org_ioprio;
+ unsigned short ioprio_class, org_ioprio_class;
+
+ pid_t pid;
+};
+
+/*
* Per block device queue structure
*/
struct cfq_data {
@@ -95,7 +140,7 @@ struct cfq_data {
*/
unsigned int busy_rt_queues;
- int rq_in_driver;
+ int rq_in_driver[2];
int sync_flight;
/*
@@ -135,51 +180,11 @@ struct cfq_data {
unsigned int cfq_slice_idle;
struct list_head cic_list;
-};
-
-/*
- * Per process-grouping structure
- */
-struct cfq_queue {
- /* reference count */
- atomic_t ref;
- /* various state flags, see below */
- unsigned int flags;
- /* parent cfq_data */
- struct cfq_data *cfqd;
- /* service_tree member */
- struct rb_node rb_node;
- /* service_tree key */
- unsigned long rb_key;
- /* prio tree member */
- struct rb_node p_node;
- /* prio tree root we belong to, if any */
- struct rb_root *p_root;
- /* sorted list of pending requests */
- struct rb_root sort_list;
- /* if fifo isn't expired, next request to serve */
- struct request *next_rq;
- /* requests queued in sort_list */
- int queued[2];
- /* currently allocated requests */
- int allocated[2];
- /* fifo list of requests in sort_list */
- struct list_head fifo;
-
- unsigned long slice_end;
- long slice_resid;
- unsigned int slice_dispatch;
-
- /* pending metadata requests */
- int meta_pending;
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
-
- /* io prio of this group */
- unsigned short ioprio, org_ioprio;
- unsigned short ioprio_class, org_ioprio_class;
- pid_t pid;
+ /*
+ * Fallback dummy cfqq for extreme OOM conditions
+ */
+ struct cfq_queue oom_cfqq;
};
enum cfqq_state_flags {
@@ -234,6 +239,11 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
struct io_context *);
+static inline int rq_in_driver(struct cfq_data *cfqd)
+{
+ return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
+}
+
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
int is_sync)
{
@@ -755,9 +765,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- cfqd->rq_in_driver++;
+ cfqd->rq_in_driver[rq_is_sync(rq)]++;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
- cfqd->rq_in_driver);
+ rq_in_driver(cfqd));
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
@@ -765,11 +775,12 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
+ WARN_ON(!cfqd->rq_in_driver[sync]);
+ cfqd->rq_in_driver[sync]--;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
- cfqd->rq_in_driver);
+ rq_in_driver(cfqd));
}
static void cfq_remove_request(struct request *rq)
@@ -1075,7 +1086,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* still requests with the driver, don't idle
*/
- if (cfqd->rq_in_driver)
+ if (rq_in_driver(cfqd))
return;
/*
@@ -1307,6 +1318,12 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
return 0;
/*
+ * Drain async requests before we start sync IO
+ */
+ if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+ return 0;
+
+ /*
* If this is an async queue and we have sync IO in flight, let it wait
*/
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
@@ -1422,7 +1439,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
cic = container_of(head, struct cfq_io_context, rcu_head);
kmem_cache_free(cfq_ioc_pool, cic);
- elv_ioc_count_dec(ioc_count);
+ elv_ioc_count_dec(cfq_ioc_count);
if (ioc_gone) {
/*
@@ -1431,7 +1448,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
* complete ioc_gone and set it back to NULL
*/
spin_lock(&ioc_gone_lock);
- if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+ if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
@@ -1557,7 +1574,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
INIT_HLIST_NODE(&cic->cic_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- elv_ioc_count_inc(ioc_count);
+ elv_ioc_count_inc(cfq_ioc_count);
}
return cic;
@@ -1641,6 +1658,26 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
ioc->ioprio_changed = 0;
}
+static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ pid_t pid, int is_sync)
+{
+ RB_CLEAR_NODE(&cfqq->rb_node);
+ RB_CLEAR_NODE(&cfqq->p_node);
+ INIT_LIST_HEAD(&cfqq->fifo);
+
+ atomic_set(&cfqq->ref, 0);
+ cfqq->cfqd = cfqd;
+
+ cfq_mark_cfqq_prio_changed(cfqq);
+
+ if (is_sync) {
+ if (!cfq_class_idle(cfqq))
+ cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_sync(cfqq);
+ }
+ cfqq->pid = pid;
+}
+
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
struct io_context *ioc, gfp_t gfp_mask)
@@ -1653,56 +1690,40 @@ retry:
/* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync);
- if (!cfqq) {
+ /*
+ * Always try a new alloc if we fell back to the OOM cfqq
+ * originally, since it should just be a temporary situation.
+ */
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+ cfqq = NULL;
if (new_cfqq) {
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
- /*
- * Inform the allocator of the fact that we will
- * just repeat this allocation if it fails, to allow
- * the allocator to do whatever it needs to attempt to
- * free memory.
- */
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
- gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+ gfp_mask | __GFP_ZERO,
cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
- goto retry;
+ if (new_cfqq)
+ goto retry;
} else {
cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
cfqd->queue->node);
- if (!cfqq)
- goto out;
}
- RB_CLEAR_NODE(&cfqq->rb_node);
- RB_CLEAR_NODE(&cfqq->p_node);
- INIT_LIST_HEAD(&cfqq->fifo);
-
- atomic_set(&cfqq->ref, 0);
- cfqq->cfqd = cfqd;
-
- cfq_mark_cfqq_prio_changed(cfqq);
-
- cfq_init_prio_data(cfqq, ioc);
-
- if (is_sync) {
- if (!cfq_class_idle(cfqq))
- cfq_mark_cfqq_idle_window(cfqq);
- cfq_mark_cfqq_sync(cfqq);
- }
- cfqq->pid = current->pid;
- cfq_log_cfqq(cfqd, cfqq, "alloced");
+ if (cfqq) {
+ cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
+ cfq_init_prio_data(cfqq, ioc);
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
+ } else
+ cfqq = &cfqd->oom_cfqq;
}
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
-out:
- WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
return cfqq;
}
@@ -1735,11 +1756,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
cfqq = *async_cfqq;
}
- if (!cfqq) {
+ if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
- if (!cfqq)
- return NULL;
- }
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -2124,11 +2142,11 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
*/
static void cfq_update_hw_tag(struct cfq_data *cfqd)
{
- if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
- cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
+ if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
+ cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
- cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
+ rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
return;
if (cfqd->hw_tag_samples++ < 50)
@@ -2155,9 +2173,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_update_hw_tag(cfqd);
- WARN_ON(!cfqd->rq_in_driver);
+ WARN_ON(!cfqd->rq_in_driver[sync]);
WARN_ON(!cfqq->dispatched);
- cfqd->rq_in_driver--;
+ cfqd->rq_in_driver[sync]--;
cfqq->dispatched--;
if (cfq_cfqq_sync(cfqq))
@@ -2191,7 +2209,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_arm_slice_timer(cfqd);
}
- if (!cfqd->rq_in_driver)
+ if (!rq_in_driver(cfqd))
cfq_schedule_dispatch(cfqd);
}
@@ -2307,10 +2325,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq) {
cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
-
- if (!cfqq)
- goto queue_fail;
-
cic_set_cfqq(cic, cfqq, is_sync);
}
@@ -2465,6 +2479,14 @@ static void *cfq_init_queue(struct request_queue *q)
for (i = 0; i < CFQ_PRIO_LISTS; i++)
cfqd->prio_trees[i] = RB_ROOT;
+ /*
+ * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
+ * Grab a permanent reference to it, so that the normal code flow
+ * will not attempt to free it.
+ */
+ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
+ atomic_inc(&cfqd->oom_cfqq.ref);
+
INIT_LIST_HEAD(&cfqd->cic_list);
cfqd->queue = q;
@@ -2658,7 +2680,7 @@ static void __exit cfq_exit(void)
* this also protects us from entering cfq_slab_kill() with
* pending RCU callbacks
*/
- if (elv_ioc_count_read(ioc_count))
+ if (elv_ioc_count_read(cfq_ioc_count))
wait_for_completion(&all_gone);
cfq_slab_kill();
}
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
deleted file mode 100644
index 572bbc2f900d..000000000000
--- a/block/cmd-filter.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright 2004 Peter M. Jones <pjones@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
- */
-
-#include <linux/list.h>
-#include <linux/genhd.h>
-#include <linux/spinlock.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/blkdev.h>
-
-#include <scsi/scsi.h>
-#include <linux/cdrom.h>
-
-int blk_verify_command(struct blk_cmd_filter *filter,
- unsigned char *cmd, fmode_t has_write_perm)
-{
- /* root can do any command. */
- if (capable(CAP_SYS_RAWIO))
- return 0;
-
- /* if there's no filter set, assume we're filtering everything out */
- if (!filter)
- return -EPERM;
-
- /* Anybody who can open the device can do a read-safe command */
- if (test_bit(cmd[0], filter->read_ok))
- return 0;
-
- /* Write-safe commands require a writable open */
- if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
- return 0;
-
- return -EPERM;
-}
-EXPORT_SYMBOL(blk_verify_command);
-
-#if 0
-/* and now, the sysfs stuff */
-static ssize_t rcf_cmds_show(struct blk_cmd_filter *filter, char *page,
- int rw)
-{
- char *npage = page;
- unsigned long *okbits;
- int i;
-
- if (rw == READ)
- okbits = filter->read_ok;
- else
- okbits = filter->write_ok;
-
- for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) {
- if (test_bit(i, okbits)) {
- npage += sprintf(npage, "0x%02x", i);
- if (i < BLK_SCSI_MAX_CMDS - 1)
- sprintf(npage++, " ");
- }
- }
-
- if (npage != page)
- npage += sprintf(npage, "\n");
-
- return npage - page;
-}
-
-static ssize_t rcf_readcmds_show(struct blk_cmd_filter *filter, char *page)
-{
- return rcf_cmds_show(filter, page, READ);
-}
-
-static ssize_t rcf_writecmds_show(struct blk_cmd_filter *filter,
- char *page)
-{
- return rcf_cmds_show(filter, page, WRITE);
-}
-
-static ssize_t rcf_cmds_store(struct blk_cmd_filter *filter,
- const char *page, size_t count, int rw)
-{
- unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits;
- int cmd, set;
- char *p, *status;
-
- if (rw == READ) {
- memcpy(&okbits, filter->read_ok, sizeof(okbits));
- target_okbits = filter->read_ok;
- } else {
- memcpy(&okbits, filter->write_ok, sizeof(okbits));
- target_okbits = filter->write_ok;
- }
-
- while ((p = strsep((char **)&page, " ")) != NULL) {
- set = 1;
-
- if (p[0] == '+') {
- p++;
- } else if (p[0] == '-') {
- set = 0;
- p++;
- }
-
- cmd = simple_strtol(p, &status, 16);
-
- /* either of these cases means invalid input, so do nothing. */
- if ((status == p) || cmd >= BLK_SCSI_MAX_CMDS)
- return -EINVAL;
-
- if (set)
- __set_bit(cmd, okbits);
- else
- __clear_bit(cmd, okbits);
- }
-
- memcpy(target_okbits, okbits, sizeof(okbits));
- return count;
-}
-
-static ssize_t rcf_readcmds_store(struct blk_cmd_filter *filter,
- const char *page, size_t count)
-{
- return rcf_cmds_store(filter, page, count, READ);
-}
-
-static ssize_t rcf_writecmds_store(struct blk_cmd_filter *filter,
- const char *page, size_t count)
-{
- return rcf_cmds_store(filter, page, count, WRITE);
-}
-
-struct rcf_sysfs_entry {
- struct attribute attr;
- ssize_t (*show)(struct blk_cmd_filter *, char *);
- ssize_t (*store)(struct blk_cmd_filter *, const char *, size_t);
-};
-
-static struct rcf_sysfs_entry rcf_readcmds_entry = {
- .attr = { .name = "read_table", .mode = S_IRUGO | S_IWUSR },
- .show = rcf_readcmds_show,
- .store = rcf_readcmds_store,
-};
-
-static struct rcf_sysfs_entry rcf_writecmds_entry = {
- .attr = {.name = "write_table", .mode = S_IRUGO | S_IWUSR },
- .show = rcf_writecmds_show,
- .store = rcf_writecmds_store,
-};
-
-static struct attribute *default_attrs[] = {
- &rcf_readcmds_entry.attr,
- &rcf_writecmds_entry.attr,
- NULL,
-};
-
-#define to_rcf(atr) container_of((atr), struct rcf_sysfs_entry, attr)
-
-static ssize_t
-rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
- struct rcf_sysfs_entry *entry = to_rcf(attr);
- struct blk_cmd_filter *filter;
-
- filter = container_of(kobj, struct blk_cmd_filter, kobj);
- if (entry->show)
- return entry->show(filter, page);
-
- return 0;
-}
-
-static ssize_t
-rcf_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *page, size_t length)
-{
- struct rcf_sysfs_entry *entry = to_rcf(attr);
- struct blk_cmd_filter *filter;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- if (!entry->store)
- return -EINVAL;
-
- filter = container_of(kobj, struct blk_cmd_filter, kobj);
- return entry->store(filter, page, length);
-}
-
-static struct sysfs_ops rcf_sysfs_ops = {
- .show = rcf_attr_show,
- .store = rcf_attr_store,
-};
-
-static struct kobj_type rcf_ktype = {
- .sysfs_ops = &rcf_sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-int blk_register_filter(struct gendisk *disk)
-{
- int ret;
- struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
-
- ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
- &disk_to_dev(disk)->kobj,
- "%s", "cmd_filter");
- if (ret < 0)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(blk_register_filter);
-
-void blk_unregister_filter(struct gendisk *disk)
-{
- struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
-
- kobject_put(&filter->kobj);
-}
-EXPORT_SYMBOL(blk_unregister_filter);
-#endif
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba41..6f2375339a99 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,6 +100,14 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0;
+ /*
+ * Don't merge if failfast settings don't match
+ */
+ if (bio_failfast_dev(bio) != blk_failfast_dev(rq) ||
+ bio_failfast_transport(bio) != blk_failfast_transport(rq) ||
+ bio_failfast_driver(bio) != blk_failfast_driver(rq))
+ return 0;
+
if (!elv_iosched_allow_merge(rq, bio))
return 0;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 5f8e798ede4e..f0e0ce0a607d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -32,6 +32,11 @@
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
+struct blk_cmd_filter {
+ unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
+ unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
+} blk_default_cmd_filter;
+
/* Command group 3 is reserved and should never be used. */
const unsigned char scsi_command_size_tbl[8] =
{
@@ -105,7 +110,7 @@ static int sg_emulated_host(struct request_queue *q, int __user *p)
return put_user(1, p);
}
-void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
{
/* Basic read-only commands */
__set_bit(TEST_UNIT_READY, filter->read_ok);
@@ -187,14 +192,37 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
}
-EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
+
+int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
+{
+ struct blk_cmd_filter *filter = &blk_default_cmd_filter;
+
+ /* root can do any command. */
+ if (capable(CAP_SYS_RAWIO))
+ return 0;
+
+ /* if there's no filter set, assume we're filtering everything out */
+ if (!filter)
+ return -EPERM;
+
+ /* Anybody who can open the device can do a read-safe command */
+ if (test_bit(cmd[0], filter->read_ok))
+ return 0;
+
+ /* Write-safe commands require a writable open */
+ if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
+ return 0;
+
+ return -EPERM;
+}
+EXPORT_SYMBOL(blk_verify_command);
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode)
{
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE))
+ if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
return -EPERM;
/*
@@ -427,7 +455,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE);
+ err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
if (err)
goto error;
@@ -645,5 +673,10 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
blk_put_queue(q);
return err;
}
-
EXPORT_SYMBOL(scsi_cmd_ioctl);
+
+int __init blk_scsi_ioctl_init(void)
+{
+ blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
+ return 0;
+}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4dfdd03e708f..f2002d8e5f67 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,6 +23,7 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
+ select CRYPTO_ANSI_CPRNG
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
@@ -782,7 +783,6 @@ config CRYPTO_ANSI_CPRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
select CRYPTO_AES
select CRYPTO_RNG
- select CRYPTO_FIPS
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e11ce37c7104..03fb5facf0b4 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,6 +14,7 @@
*/
#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -25,6 +26,8 @@
#include "internal.h"
+static const char *skcipher_default_geniv __read_mostly;
+
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -180,7 +183,8 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type);
const char *crypto_default_geniv(const struct crypto_alg *alg)
{
- return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
+ return alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "eseqiv" : skcipher_default_geniv;
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -201,8 +205,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
int err;
larval = crypto_larval_lookup(alg->cra_driver_name,
+ (type & ~CRYPTO_ALG_TYPE_MASK) |
CRYPTO_ALG_TYPE_GIVCIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ mask | CRYPTO_ALG_TYPE_MASK);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto out;
@@ -360,3 +365,17 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
+
+static int __init skcipher_module_init(void)
+{
+ skcipher_default_geniv = num_possible_cpus() > 1 ?
+ "eseqiv" : "chainiv";
+ return 0;
+}
+
+static void skcipher_module_exit(void)
+{
+}
+
+module_init(skcipher_module_init);
+module_exit(skcipher_module_exit);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index d80ed4c1e009..5357ba7d821a 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
/* Our exported functions */
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
{
- unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
@@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
if (nbytes < 0)
return -EINVAL;
- spin_lock_irqsave(&ctx->prng_lock, flags);
+ spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET)
@@ -268,7 +267,7 @@ empty_rbuf:
goto remainder;
done:
- spin_unlock_irqrestore(&ctx->prng_lock, flags);
+ spin_unlock_bh(&ctx->prng_lock);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
err, ctx);
return err;
@@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx,
unsigned char *V, unsigned char *DT)
{
int ret;
- int rc = -EINVAL;
unsigned char *prng_key;
- spin_lock(&ctx->prng_lock);
+ spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
@@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx,
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
- if (ctx->tfm)
- crypto_free_cipher(ctx->tfm);
-
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- ctx->tfm = NULL;
- goto out;
- }
-
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_cipher_get_flags(ctx->tfm));
- crypto_free_cipher(ctx->tfm);
goto out;
}
- rc = 0;
+ ret = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
- spin_unlock(&ctx->prng_lock);
-
- return rc;
-
+ spin_unlock_bh(&ctx->prng_lock);
+ return ret;
}
static int cprng_init(struct crypto_tfm *tfm)
@@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm)
struct prng_context *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock);
+ ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(ctx->tfm)) {
+ dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
+ ctx);
+ return PTR_ERR(ctx->tfm);
+ }
if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
return -EINVAL;
diff --git a/crypto/internal.h b/crypto/internal.h
index 113579a82dff..95baaea21fbc 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -25,12 +25,7 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
-
-#ifdef CONFIG_CRYPTO_FIPS
-extern int fips_enabled;
-#else
-#define fips_enabled 0
-#endif
+#include <linux/fips.h>
/* Crypto notification events. */
enum {
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d59ba5079d14..a890a6792c7b 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -45,6 +45,8 @@
*/
static unsigned int sec;
+static char *alg = NULL;
+static u32 type;
static int mode;
static char *tvmem[TVMEMSIZE];
@@ -885,6 +887,11 @@ static int do_test(int m)
return ret;
}
+static int do_alg_test(const char *alg, u32 type)
+{
+ return crypto_has_alg(alg, type, CRYPTO_ALG_TYPE_MASK) ? 0 : -ENOENT;
+}
+
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
@@ -896,7 +903,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
- err = do_test(mode);
+ if (alg)
+ err = do_alg_test(alg, type);
+ else
+ err = do_test(mode);
+
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
@@ -928,6 +939,8 @@ static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
+module_param(alg, charp, 0);
+module_param(type, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e9e9d84293b9..29b228d9b1a2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
hash_buff = xbuf[0];
- ret = -EINVAL;
- if (WARN_ON(template[i].psize > PAGE_SIZE))
- goto out;
-
memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize);
@@ -2348,6 +2344,7 @@ static int alg_find_test(const char *alg)
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
+ int j;
int rc;
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
@@ -2369,14 +2366,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
}
i = alg_find_test(alg);
- if (i < 0)
+ j = alg_find_test(driver);
+ if (i < 0 && j < 0)
goto notest;
- if (fips_enabled && !alg_test_descs[i].fips_allowed)
+ if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
+ (j >= 0 && !alg_test_descs[j].fips_allowed)))
goto non_fips_alg;
- rc = alg_test_descs[i].test(alg_test_descs + i, driver,
- type, mask);
+ rc = 0;
+ if (i >= 0)
+ rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
+ type, mask);
+ if (j >= 0)
+ rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
+ type, mask);
+
test_done:
if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7ec7d88c5999..41adbb3d2cbe 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -60,7 +60,11 @@ config ACPI_PROCFS
/proc/acpi/fadt (/sys/firmware/acpi/tables/FACP)
/proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer)
/proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level)
-
+ /proc/acpi/processor/*/power (/sys/devices/system/cpu/*/cpuidle/*)
+ /proc/acpi/processor/*/performance (/sys/devices/system/cpu/*/
+ cpufreq/*)
+ /proc/acpi/processor/*/throttling (/sys/class/thermal/
+ cooling_device*/*)
This option has no effect on /proc/acpi/ files
and functions which do not yet exist in /sys.
@@ -196,6 +200,17 @@ config ACPI_HOTPLUG_CPU
select ACPI_CONTAINER
default y
+config ACPI_PROCESSOR_AGGREGATOR
+ tristate "Processor Aggregator"
+ depends on ACPI_PROCESSOR
+ depends on EXPERIMENTAL
+ help
+ ACPI 4.0 defines processor Aggregator, which enables OS to perform
+ specfic processor configuration and control that applies to all
+ processors in the platform. Currently only logical processor idling
+ is defined, which is to reduce power consumption. This driver
+ support the new device.
+
config ACPI_THERMAL
tristate "Thermal Zone"
depends on ACPI_PROCESSOR
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 03a985be3fe3..4ea5be388181 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,3 +61,5 @@ obj-$(CONFIG_ACPI_SBS) += sbs.o
processor-y := processor_core.o processor_throttling.o
processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
+
+obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += processor_aggregator.o
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 7a0f4aa4fa1e..a8d9d8fac5d4 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -50,7 +50,6 @@ MODULE_LICENSE("GPL");
static int acpi_memory_device_add(struct acpi_device *device);
static int acpi_memory_device_remove(struct acpi_device *device, int type);
-static int acpi_memory_device_start(struct acpi_device *device);
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
@@ -65,7 +64,6 @@ static struct acpi_driver acpi_memory_device_driver = {
.ops = {
.add = acpi_memory_device_add,
.remove = acpi_memory_device_remove,
- .start = acpi_memory_device_start,
},
};
@@ -415,28 +413,6 @@ static int acpi_memory_device_add(struct acpi_device *device)
printk(KERN_DEBUG "%s \n", acpi_device_name(device));
- return result;
-}
-
-static int acpi_memory_device_remove(struct acpi_device *device, int type)
-{
- struct acpi_memory_device *mem_device = NULL;
-
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- mem_device = acpi_driver_data(device);
- kfree(mem_device);
-
- return 0;
-}
-
-static int acpi_memory_device_start (struct acpi_device *device)
-{
- struct acpi_memory_device *mem_device;
- int result = 0;
-
/*
* Early boot code has recognized memory area by EFI/E820.
* If DSDT shows these memory devices on boot, hotplug is not necessary
@@ -446,8 +422,6 @@ static int acpi_memory_device_start (struct acpi_device *device)
if (!acpi_hotmem_initialized)
return 0;
- mem_device = acpi_driver_data(device);
-
if (!acpi_memory_check_device(mem_device)) {
/* call add_memory func */
result = acpi_memory_enable_device(mem_device);
@@ -458,6 +432,20 @@ static int acpi_memory_device_start (struct acpi_device *device)
return result;
}
+static int acpi_memory_device_remove(struct acpi_device *device, int type)
+{
+ struct acpi_memory_device *mem_device = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ mem_device = acpi_driver_data(device);
+ kfree(mem_device);
+
+ return 0;
+}
+
/*
* Helper function to check for memory device
*/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index cd55c774e882..5df6af7897bf 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -203,10 +203,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
/* Disable the fixed event */
if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
- status =
- acpi_write_bit_register(acpi_gbl_fixed_event_info
- [i].enable_register_id,
- ACPI_DISABLE_EVENT);
+ status = acpi_disable_event(i, 0);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -288,18 +285,14 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
ACPI_FUNCTION_ENTRY();
/* Clear the status bit */
-
- (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
- status_register_id, ACPI_CLEAR_STATUS);
+ acpi_clear_event(event);
/*
* Make sure we've got a handler. If not, report an error. The event is
* disabled to prevent further interrupts.
*/
if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
- (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
- enable_register_id,
- ACPI_DISABLE_EVENT);
+ acpi_disable_event(event, 0);
ACPI_ERROR((AE_INFO,
"No installed handler for fixed event [%08X]",
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index db307a356f08..283e872f87a3 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -534,6 +534,12 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
}
}
+
+ /* Clear any pending events before enabling interrupts */
+
+ acpi_hw_disable_all_gpes();
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+
return_ACPI_STATUS(status);
}
@@ -578,15 +584,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
/*
* GPEs must be enabled before _WAK is called as GPEs
* might get fired there
- *
- * Restore the GPEs:
- * 1) Disable/Clear all GPEs
- * 2) Enable all runtime GPEs
*/
- status = acpi_hw_disable_all_gpes();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
status = acpi_hw_enable_all_runtime_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -610,15 +608,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
/* Enable power button */
- (void)
- acpi_write_bit_register(acpi_gbl_fixed_event_info
- [ACPI_EVENT_POWER_BUTTON].
- enable_register_id, ACPI_ENABLE_EVENT);
-
- (void)
- acpi_write_bit_register(acpi_gbl_fixed_event_info
- [ACPI_EVENT_POWER_BUTTON].
- status_register_id, ACPI_CLEAR_STATUS);
+ acpi_enable_event(ACPI_EVENT_POWER_BUTTON, 0);
arg.integer.value = ACPI_SST_WORKING;
status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 391f331674c7..d6bf0578737b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -788,6 +788,42 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
return AE_CTRL_TERMINATE;
}
+static int ec_install_handlers(struct acpi_ec *ec)
+{
+ acpi_status status;
+ if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
+ return 0;
+ status = acpi_install_gpe_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_enable_gpe(NULL, ec->gpe);
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+ NULL, ec);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Maybe OS fails in evaluating the _REG object.
+ * The AE_NOT_FOUND error will be ignored and OS
+ * continue to initialize EC.
+ */
+ printk(KERN_ERR "Fail in evaluating the _REG object"
+ " of EC device. Broken bios is suspected.\n");
+ } else {
+ acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler);
+ return -ENODEV;
+ }
+ }
+
+ set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+ return 0;
+}
+
static void ec_remove_handlers(struct acpi_ec *ec)
{
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
@@ -802,9 +838,8 @@ static void ec_remove_handlers(struct acpi_ec *ec)
static int acpi_ec_add(struct acpi_device *device)
{
struct acpi_ec *ec = NULL;
+ int ret;
- if (!device)
- return -EINVAL;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
@@ -839,7 +874,12 @@ static int acpi_ec_add(struct acpi_device *device)
ec->gpe, ec->command_addr, ec->data_addr);
pr_info(PREFIX "driver started in %s mode\n",
(test_bit(EC_FLAGS_GPE_MODE, &ec->flags))?"interrupt":"poll");
- return 0;
+
+ ret = ec_install_handlers(ec);
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ return ret;
}
static int acpi_ec_remove(struct acpi_device *device, int type)
@@ -851,6 +891,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
return -EINVAL;
ec = acpi_driver_data(device);
+ ec_remove_handlers(ec);
mutex_lock(&ec->lock);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
list_del(&handler->node);
@@ -888,75 +929,6 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
return AE_OK;
}
-static int ec_install_handlers(struct acpi_ec *ec)
-{
- acpi_status status;
- if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
- return 0;
- status = acpi_install_gpe_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- if (ACPI_FAILURE(status))
- return -ENODEV;
- acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
- acpi_enable_gpe(NULL, ec->gpe);
- status = acpi_install_address_space_handler(ec->handle,
- ACPI_ADR_SPACE_EC,
- &acpi_ec_space_handler,
- NULL, ec);
- if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- /*
- * Maybe OS fails in evaluating the _REG object.
- * The AE_NOT_FOUND error will be ignored and OS
- * continue to initialize EC.
- */
- printk(KERN_ERR "Fail in evaluating the _REG object"
- " of EC device. Broken bios is suspected.\n");
- } else {
- acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler);
- return -ENODEV;
- }
- }
-
- set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
- return 0;
-}
-
-static int acpi_ec_start(struct acpi_device *device)
-{
- struct acpi_ec *ec;
- int ret = 0;
-
- if (!device)
- return -EINVAL;
-
- ec = acpi_driver_data(device);
-
- if (!ec)
- return -EINVAL;
-
- ret = ec_install_handlers(ec);
-
- /* EC is fully operational, allow queries */
- clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- return ret;
-}
-
-static int acpi_ec_stop(struct acpi_device *device, int type)
-{
- struct acpi_ec *ec;
- if (!device)
- return -EINVAL;
- ec = acpi_driver_data(device);
- if (!ec)
- return -EINVAL;
- ec_remove_handlers(ec);
-
- return 0;
-}
-
int __init acpi_boot_ec_enable(void)
{
if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
@@ -1077,8 +1049,6 @@ static struct acpi_driver acpi_ec_driver = {
.ops = {
.add = acpi_ec_add,
.remove = acpi_ec_remove,
- .start = acpi_ec_start,
- .stop = acpi_ec_stop,
.suspend = acpi_ec_suspend,
.resume = acpi_ec_resume,
},
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 8a5bf3b356fa..55b5b90c2a44 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -395,7 +395,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
fn = adr & 0xffff;
pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
- if (hnd == handle)
+ if (!pdev || hnd == handle)
break;
pbus = pdev->subordinate;
diff --git a/drivers/acpi/processor_aggregator.c b/drivers/acpi/processor_aggregator.c
new file mode 100644
index 000000000000..d73ae57f7a6d
--- /dev/null
+++ b/drivers/acpi/processor_aggregator.c
@@ -0,0 +1,389 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/clockchips.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
+#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
+#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+static DEFINE_MUTEX(isolated_cpus_lock);
+
+#define MWAIT_SUBSTATE_MASK (0xf)
+#define MWAIT_CSTATE_MASK (0xf)
+#define MWAIT_SUBSTATE_SIZE (4)
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
+static unsigned long power_saving_mwait_eax;
+static void power_saving_mwait_init(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+
+ if (!boot_cpu_has(X86_FEATURE_MWAIT))
+ return;
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return;
+
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+
+ for_each_online_cpu(i)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
+
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ case X86_VENDOR_INTEL:
+ /*
+ * AMD Fam10h TSC will tick in all
+ * C/P/S0/S1 states when this bit is set.
+ */
+ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ return;
+
+ /*FALL THROUGH*/
+ default:
+ /* TSC could halt in idle, so notify users */
+ mark_tsc_unstable("TSC halts in idle");
+ }
+#endif
+}
+
+static int power_saving_thread(void *data)
+{
+ struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
+ int do_sleep;
+
+ /*
+ * we just create a RT task to do power saving. Scheduler will migrate
+ * the task to any CPU.
+ */
+ sched_setscheduler(current, SCHED_RR, &param);
+
+ while (!kthread_should_stop()) {
+ int cpu;
+ u64 expire_time;
+
+ try_to_freeze();
+
+ do_sleep = 0;
+
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+
+ expire_time = jiffies + HZ * 95 /100;
+
+ while (!need_resched()) {
+ local_irq_disable();
+ cpu = smp_processor_id();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+ stop_critical_timings();
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(power_saving_mwait_eax, 1);
+
+ start_critical_timings();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+ local_irq_enable();
+
+ if (jiffies > expire_time) {
+ do_sleep = 1;
+ break;
+ }
+ }
+
+ current_thread_info()->status |= TS_POLLING;
+
+ /*
+ * current sched_rt has threshold for rt task running time.
+ * When a rt task uses 95% CPU time, the rt thread will be
+ * scheduled out for 5% CPU time to not starve other tasks. But
+ * the mechanism only works when all CPUs have RT task running,
+ * as if one CPU hasn't RT task, RT task from other CPUs will
+ * borrow CPU time from this CPU and cause RT task use > 95%
+ * CPU time. To make 'avoid staration' work, takes a nap here.
+ */
+ if (do_sleep)
+ schedule_timeout_killable(HZ * 5 /100);
+ }
+ return 0;
+}
+
+static struct task_struct *ps_tsks[NR_CPUS];
+static unsigned int ps_tsk_num;
+static int create_power_saving_task(void)
+{
+ ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, NULL,
+ "power_saving/%d", ps_tsk_num);
+ if (ps_tsks[ps_tsk_num]) {
+ ps_tsk_num++;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void destroy_power_saving_task(void)
+{
+ if (ps_tsk_num > 0) {
+ ps_tsk_num--;
+ kthread_stop(ps_tsks[ps_tsk_num]);
+ }
+}
+
+static void set_power_saving_task_num(unsigned int num)
+{
+ if (num > ps_tsk_num) {
+ while (ps_tsk_num < num) {
+ if (create_power_saving_task())
+ return;
+ }
+ } else if (num < ps_tsk_num) {
+ while (ps_tsk_num > num)
+ destroy_power_saving_task();
+ }
+}
+
+static int acpi_processor_aggregator_idle_cpus(unsigned int num_cpus)
+{
+ get_online_cpus();
+
+ num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
+ set_power_saving_task_num(num_cpus);
+
+ put_online_cpus();
+ return 0;
+}
+
+static uint32_t acpi_processor_aggregator_idle_cpus_num(void)
+{
+ return ps_tsk_num;
+}
+
+static ssize_t acpi_processor_aggregator_idlecpus_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long num;
+ if (strict_strtoul(buf, 0, &num))
+ return -EINVAL;
+ mutex_lock(&isolated_cpus_lock);
+ acpi_processor_aggregator_idle_cpus(num);
+ mutex_unlock(&isolated_cpus_lock);
+ return count;
+}
+
+static ssize_t acpi_processor_aggregator_idlecpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d",
+ acpi_processor_aggregator_idle_cpus_num());
+}
+static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
+ acpi_processor_aggregator_idlecpus_show,
+ acpi_processor_aggregator_idlecpus_store);
+
+static int acpi_processor_aggregator_add_sysfs(struct acpi_device *device)
+{
+ int result;
+
+ result = device_create_file(&device->dev, &dev_attr_idlecpus);
+ if (result)
+ return -ENODEV;
+ return 0;
+}
+
+static void acpi_processor_aggregator_remove_sysfs(struct acpi_device *device)
+{
+ device_remove_file(&device->dev, &dev_attr_idlecpus);
+}
+
+/* Query firmware how many CPUs should be idle */
+static int acpi_processor_aggregator_pur(acpi_handle handle, int *num_cpus)
+{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ union acpi_object *package;
+ int rev, num, ret = -EINVAL;
+
+ status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ package = buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
+ goto out;
+ rev = package->package.elements[0].integer.value;
+ num = package->package.elements[1].integer.value;
+ if (rev != 1)
+ goto out;
+ *num_cpus = num;
+ ret = 0;
+out:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+/* Notify firmware how many CPUs are idle */
+static void acpi_processor_aggregator_ost(acpi_handle handle, int stat,
+ uint32_t idle_cpus)
+{
+ union acpi_object params[3] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_BUFFER,},
+ };
+ struct acpi_object_list arg_list = {3, params};
+
+ params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
+ params[1].integer.value = stat;
+ params[2].buffer.length = 4;
+ params[2].buffer.pointer = (void *)&idle_cpus;
+ acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+}
+
+static void acpi_processor_aggregator_handle_notify(acpi_handle handle)
+{
+ int num_cpus, ret;
+ uint32_t idle_cpus;
+
+ mutex_lock(&isolated_cpus_lock);
+ if (acpi_processor_aggregator_pur(handle, &num_cpus)) {
+ mutex_unlock(&isolated_cpus_lock);
+ return;
+ }
+ ret = acpi_processor_aggregator_idle_cpus(num_cpus);
+ idle_cpus = acpi_processor_aggregator_idle_cpus_num();
+ if (!ret)
+ acpi_processor_aggregator_ost(handle, 0, idle_cpus);
+ else
+ acpi_processor_aggregator_ost(handle, 1, 0);
+ mutex_unlock(&isolated_cpus_lock);
+}
+
+static void acpi_processor_aggregator_notify(acpi_handle handle, u32 event,
+ void *data)
+{
+ struct acpi_device *device = data;
+
+ switch (event) {
+ case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
+ acpi_processor_aggregator_handle_notify(handle);
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
+ default:
+ printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+static int acpi_processor_aggregator_add(struct acpi_device *device)
+{
+ acpi_status status;
+
+ strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
+
+ if (acpi_processor_aggregator_add_sysfs(device))
+ return -ENODEV;
+
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY, acpi_processor_aggregator_notify, device);
+ if (ACPI_FAILURE(status)) {
+ acpi_processor_aggregator_remove_sysfs(device);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_processor_aggregator_remove(struct acpi_device *device, int type)
+{
+ mutex_lock(&isolated_cpus_lock);
+ acpi_processor_aggregator_idle_cpus(0);
+ mutex_unlock(&isolated_cpus_lock);
+
+ acpi_remove_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY, acpi_processor_aggregator_notify);
+ acpi_processor_aggregator_remove_sysfs(device);
+ return 0;
+}
+
+static const struct acpi_device_id processor_aggregator_device_ids[] = {
+ {"ACPI000C", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, processor_aggregator_device_ids);
+
+static struct acpi_driver acpi_processor_aggregator_driver = {
+ .name = "processor_aggregator",
+ .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
+ .ids = processor_aggregator_device_ids,
+ .ops = {
+ .add = acpi_processor_aggregator_add,
+ .remove = acpi_processor_aggregator_remove,
+ },
+};
+
+static int __init acpi_processor_aggregator_init(void)
+{
+ power_saving_mwait_init();
+ if (power_saving_mwait_eax == 0)
+ return -EINVAL;
+
+ return acpi_bus_register_driver(&acpi_processor_aggregator_driver);
+}
+
+static void __exit acpi_processor_aggregator_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_processor_aggregator_driver);
+}
+
+module_init(acpi_processor_aggregator_init);
+module_exit(acpi_processor_aggregator_exit);
+MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
+MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 84e0f3c07442..3bcef820540d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -79,9 +79,10 @@ MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
-static int acpi_processor_start(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
+#endif
static void acpi_processor_notify(struct acpi_device *device, u32 event);
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -101,7 +102,6 @@ static struct acpi_driver acpi_processor_driver = {
.ops = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
- .start = acpi_processor_start,
.suspend = acpi_processor_suspend,
.resume = acpi_processor_resume,
.notify = acpi_processor_notify,
@@ -110,7 +110,7 @@ static struct acpi_driver acpi_processor_driver = {
#define INSTALL_NOTIFY_HANDLER 1
#define UNINSTALL_NOTIFY_HANDLER 2
-
+#ifdef CONFIG_ACPI_PROCFS
static const struct file_operations acpi_processor_info_fops = {
.owner = THIS_MODULE,
.open = acpi_processor_info_open_fs,
@@ -118,6 +118,7 @@ static const struct file_operations acpi_processor_info_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
DEFINE_PER_CPU(struct acpi_processor *, processors);
struct acpi_processor_errata errata __read_mostly;
@@ -316,6 +317,7 @@ static int acpi_processor_set_pdc(struct acpi_processor *pr)
FS Interface (/proc)
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_processor_dir = NULL;
static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
@@ -388,7 +390,6 @@ static int acpi_processor_add_fs(struct acpi_device *device)
return -EIO;
return 0;
}
-
static int acpi_processor_remove_fs(struct acpi_device *device)
{
@@ -405,6 +406,16 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
return 0;
}
+#else
+static inline int acpi_processor_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_processor_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
/* Use the acpiid in MADT to map cpus in case of SMP */
@@ -698,92 +709,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
static DEFINE_PER_CPU(void *, processor_device_array);
-static int __cpuinit acpi_processor_start(struct acpi_device *device)
-{
- int result = 0;
- struct acpi_processor *pr;
- struct sys_device *sysdev;
-
- pr = acpi_driver_data(device);
-
- result = acpi_processor_get_info(device);
- if (result) {
- /* Processor is physically not present */
- return 0;
- }
-
- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
-
- /*
- * Buggy BIOS check
- * ACPI id of processors can be reported wrongly by the BIOS.
- * Don't trust it blindly
- */
- if (per_cpu(processor_device_array, pr->id) != NULL &&
- per_cpu(processor_device_array, pr->id) != device) {
- printk(KERN_WARNING "BIOS reported wrong ACPI id "
- "for the processor\n");
- return -ENODEV;
- }
- per_cpu(processor_device_array, pr->id) = device;
-
- per_cpu(processors, pr->id) = pr;
-
- result = acpi_processor_add_fs(device);
- if (result)
- goto end;
-
- sysdev = get_cpu_sysdev(pr->id);
- if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
- return -EFAULT;
-
- /* _PDC call should be done before doing anything else (if reqd.). */
- arch_acpi_processor_init_pdc(pr);
- acpi_processor_set_pdc(pr);
- arch_acpi_processor_cleanup_pdc(pr);
-
-#ifdef CONFIG_CPU_FREQ
- acpi_processor_ppc_has_changed(pr);
-#endif
- acpi_processor_get_throttling_info(pr);
- acpi_processor_get_limit_info(pr);
-
-
- acpi_processor_power_init(pr, device);
-
- pr->cdev = thermal_cooling_device_register("Processor", device,
- &processor_cooling_ops);
- if (IS_ERR(pr->cdev)) {
- result = PTR_ERR(pr->cdev);
- goto end;
- }
-
- dev_info(&device->dev, "registered as cooling_device%d\n",
- pr->cdev->id);
-
- result = sysfs_create_link(&device->dev.kobj,
- &pr->cdev->device.kobj,
- "thermal_cooling");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
- result = sysfs_create_link(&pr->cdev->device.kobj,
- &device->dev.kobj,
- "device");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
-
- if (pr->flags.throttling) {
- printk(KERN_INFO PREFIX "%s [%s] (supports",
- acpi_device_name(device), acpi_device_bid(device));
- printk(" %d throttling states", pr->throttling.state_count);
- printk(")\n");
- }
-
- end:
-
- return result;
-}
-
static void acpi_processor_notify(struct acpi_device *device, u32 event)
{
struct acpi_processor *pr = acpi_driver_data(device);
@@ -846,10 +771,8 @@ static struct notifier_block acpi_cpu_notifier =
static int acpi_processor_add(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
-
-
- if (!device)
- return -EINVAL;
+ int result = 0;
+ struct sys_device *sysdev;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
@@ -865,7 +788,100 @@ static int acpi_processor_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
+ result = acpi_processor_get_info(device);
+ if (result) {
+ /* Processor is physically not present */
+ return 0;
+ }
+
+ BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+
+ /*
+ * Buggy BIOS check
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
+ if (per_cpu(processor_device_array, pr->id) != NULL &&
+ per_cpu(processor_device_array, pr->id) != device) {
+ printk(KERN_WARNING "BIOS reported wrong ACPI id "
+ "for the processor\n");
+ result = -ENODEV;
+ goto err_free_cpumask;
+ }
+ per_cpu(processor_device_array, pr->id) = device;
+
+ per_cpu(processors, pr->id) = pr;
+
+ result = acpi_processor_add_fs(device);
+ if (result)
+ goto err_free_cpumask;
+
+ sysdev = get_cpu_sysdev(pr->id);
+ if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
+ result = -EFAULT;
+ goto err_remove_fs;
+ }
+
+ /* _PDC call should be done before doing anything else (if reqd.). */
+ arch_acpi_processor_init_pdc(pr);
+ acpi_processor_set_pdc(pr);
+ arch_acpi_processor_cleanup_pdc(pr);
+
+#ifdef CONFIG_CPU_FREQ
+ acpi_processor_ppc_has_changed(pr);
+#endif
+ acpi_processor_get_throttling_info(pr);
+ acpi_processor_get_limit_info(pr);
+
+
+ acpi_processor_power_init(pr, device);
+
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (IS_ERR(pr->cdev)) {
+ result = PTR_ERR(pr->cdev);
+ goto err_power_exit;
+ }
+
+ dev_info(&device->dev, "registered as cooling_device%d\n",
+ pr->cdev->id);
+
+ result = sysfs_create_link(&device->dev.kobj,
+ &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result) {
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ goto err_thermal_unregister;
+ }
+ result = sysfs_create_link(&pr->cdev->device.kobj,
+ &device->dev.kobj,
+ "device");
+ if (result) {
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ goto err_remove_sysfs;
+ }
+
+ if (pr->flags.throttling) {
+ printk(KERN_INFO PREFIX "%s [%s] (supports",
+ acpi_device_name(device), acpi_device_bid(device));
+ printk(" %d throttling states", pr->throttling.state_count);
+ printk(")\n");
+ }
+
return 0;
+
+err_remove_sysfs:
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+err_thermal_unregister:
+ thermal_cooling_device_unregister(pr->cdev);
+err_power_exit:
+ acpi_processor_power_exit(pr, device);
+err_remove_fs:
+ acpi_processor_remove_fs(device);
+err_free_cpumask:
+ free_cpumask_var(pr->throttling.shared_cpu_map);
+
+ return result;
}
static int acpi_processor_remove(struct acpi_device *device, int type)
@@ -942,7 +958,6 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
{
acpi_handle phandle;
struct acpi_device *pdev;
- struct acpi_processor *pr;
if (acpi_get_parent(handle, &phandle)) {
@@ -957,15 +972,6 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
return -ENODEV;
}
- acpi_bus_start(*device);
-
- pr = acpi_driver_data(*device);
- if (!pr)
- return -ENODEV;
-
- if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
- kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
- }
return 0;
}
@@ -995,25 +1001,6 @@ static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
"Unable to add the device\n");
break;
}
-
- pr = acpi_driver_data(device);
- if (!pr) {
- printk(KERN_ERR PREFIX "Driver data is NULL\n");
- break;
- }
-
- if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- break;
- }
-
- result = acpi_processor_start(device);
- if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
- kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
- } else {
- printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
- acpi_device_bid(device));
- }
break;
case ACPI_NOTIFY_EJECT_REQUEST:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -1030,9 +1017,6 @@ static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
"Driver data is NULL, dropping EJECT\n");
return;
}
-
- if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -1158,11 +1142,11 @@ static int __init acpi_processor_init(void)
(struct acpi_table_header **)&madt)))
madt = NULL;
#endif
-
+#ifdef CONFIG_ACPI_PROCFS
acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
if (!acpi_processor_dir)
return -ENOMEM;
-
+#endif
/*
* Check whether the system is DMI table. If yes, OSPM
* should not use mwait for CPU-states.
@@ -1190,7 +1174,9 @@ out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
out_proc:
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return result;
}
@@ -1207,7 +1193,9 @@ static void __exit acpi_processor_exit(void)
cpuidle_unregister_driver(&acpi_idle_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0efa59e7e3af..035d05881984 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -678,6 +678,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
return 0;
}
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_processor *pr = seq->private;
@@ -757,7 +758,7 @@ static const struct file_operations acpi_processor_power_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-
+#endif
/**
* acpi_idle_bm_check - checks if bus master activity was detected
@@ -1158,7 +1159,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
{
acpi_status status = 0;
static int first_run;
+#ifdef CONFIG_ACPI_PROCFS
struct proc_dir_entry *entry = NULL;
+#endif
unsigned int i;
if (boot_option_idle_override)
@@ -1215,7 +1218,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
pr->power.states[i].type);
printk(")\n");
}
-
+#ifdef CONFIG_ACPI_PROCFS
/* 'power' [R] */
entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
S_IRUGO, acpi_device_dir(device),
@@ -1223,6 +1226,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
acpi_driver_data(device));
if (!entry)
return -EIO;
+#endif
return 0;
}
@@ -1235,9 +1239,11 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
cpuidle_unregister_device(&pr->power.dev);
pr->flags.power_setup_done = 0;
+#ifdef CONFIG_ACPI_PROCFS
if (acpi_device_dir(device))
remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
acpi_device_dir(device));
+#endif
return 0;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 60e543d3234e..ac7f6b306460 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -509,7 +509,7 @@ int acpi_processor_preregister_performance(
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
- if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&performance_mutex);
@@ -556,7 +556,6 @@ int acpi_processor_preregister_performance(
* Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
- cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 39838c666032..07e26140e977 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -438,7 +438,7 @@ struct thermal_cooling_device_ops processor_cooling_ops = {
};
/* /proc interface */
-
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_processor *pr = (struct acpi_processor *)seq->private;
@@ -517,3 +517,4 @@ const struct file_operations acpi_processor_limit_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 227543789ba9..aa1dd6a75a73 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -74,7 +74,7 @@ static int acpi_processor_update_tsd_coord(void)
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
- if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
/*
@@ -102,7 +102,6 @@ static int acpi_processor_update_tsd_coord(void)
if (retval)
goto err_ret;
- cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
@@ -852,10 +851,21 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
return 0;
}
+struct get_throttling {
+ struct acpi_processor *pr;
+ int ret;
+};
+
+static void get_throttling(void *_gt)
+{
+ struct get_throttling *gt = _gt;
+
+ gt->ret = gt->pr->throttling.acpi_processor_get_throttling(gt->pr);
+}
+
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
- cpumask_var_t saved_mask;
- int ret;
+ struct get_throttling gt;
if (!pr)
return -EINVAL;
@@ -863,21 +873,9 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- /*
- * Migrate task to the cpu pointed by pr.
- */
- cpumask_copy(saved_mask, &current->cpus_allowed);
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(pr->id));
- ret = pr->throttling.acpi_processor_get_throttling(pr);
- /* restore the previous state */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(saved_mask);
-
- return ret;
+ gt.pr = pr;
+ smp_call_function_single(pr->id, get_throttling, &gt, 1);
+ return gt.ret;
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1018,9 +1016,23 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
+struct set_throttling_info {
+ struct acpi_processor *pr;
+ struct acpi_processor_throttling *p_throttling;
+ int target_state;
+ int ret;
+};
+
+static void set_throttling(void *_sti)
+{
+ struct set_throttling_info *s = _sti;
+
+ s->ret = s->p_throttling->acpi_processor_set_throttling(s->pr,
+ s->target_state);
+}
+
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
- cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
@@ -1037,15 +1049,9 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
- free_cpumask_var(saved_mask);
+ if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL))
return -ENOMEM;
- }
- cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
cpumask_and(online_throttling_cpus, cpu_online_mask,
@@ -1067,10 +1073,10 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(pr->id));
- ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state);
+ struct set_throttling_info sti
+ = { pr, p_throttling, t_state.target_state };
+ smp_call_function_single(pr->id, set_throttling, &sti, 1);
+ ret = sti.ret;
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@@ -1078,6 +1084,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* cpus.
*/
for_each_cpu(i, online_throttling_cpus) {
+ struct set_throttling_info sti;
+
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1099,11 +1107,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(i));
- ret = match_pr->throttling.
- acpi_processor_set_throttling(
- match_pr, t_state.target_state);
+ sti.pr = match_pr;
+ sti.p_throttling = &match_pr->throttling;
+ sti.target_state = t_state.target_state;
+ smp_call_function_single(i, set_throttling, &sti, 1);
+ ret = sti.ret;
}
}
/*
@@ -1117,11 +1125,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
- /* restore the previous state */
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(online_throttling_cpus);
- free_cpumask_var(saved_mask);
return ret;
}
@@ -1214,7 +1217,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
}
/* proc interface */
-
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_throttling_seq_show(struct seq_file *seq,
void *offset)
{
@@ -1322,3 +1325,4 @@ const struct file_operations acpi_processor_throttling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 781435d7e369..4a89f081160f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -426,9 +426,6 @@ static int acpi_device_probe(struct device * dev)
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
- if (acpi_drv->ops.stop)
- acpi_drv->ops.stop(acpi_dev,
- acpi_dev->removal_type);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev,
acpi_dev->removal_type);
@@ -452,8 +449,6 @@ static int acpi_device_remove(struct device * dev)
if (acpi_drv) {
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
- if (acpi_drv->ops.stop)
- acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 01574a066534..bf2e2e167105 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -257,20 +257,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
- /* ACPI 3.0 specs (P62) says that it's the responsibility
- * of the OSPM to clear the status bit [ implying that the
- * POWER_BUTTON event should not reach userspace ]
- */
- if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
- acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
-
- /*
- * Disable and clear GPE status before interrupt is enabled. Some GPEs
- * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
- * acpi_leave_sleep_state will reenable specific GPEs later
- */
- acpi_disable_all_gpes();
-
local_irq_restore(flags);
printk(KERN_DEBUG "Back to C!\n");
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8851315ce858..4f22c075c31e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -603,6 +603,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
unsigned long long *level)
{
acpi_status status = AE_OK;
+ int i;
if (device->cap._BQC || device->cap._BCQ) {
char *buf = device->cap._BQC ? "_BQC" : "_BCQ";
@@ -618,8 +619,15 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
}
*level += bqc_offset_aml_bug_workaround;
- device->brightness->curr = *level;
- return 0;
+ for (i = 2; i < device->brightness->count; i++)
+ if (device->brightness->levels[i] == *level) {
+ device->brightness->curr = *level;
+ return 0;
+ }
+ /* BQC returned an invalid level. Stop using it. */
+ ACPI_WARNING((AE_INFO, "%s returned an invalid level",
+ buf));
+ device->cap._BQC = device->cap._BCQ = 0;
} else {
/* Fixme:
* should we return an error or ignore this failure?
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3d763fdf99b7..246650673010 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -207,6 +207,16 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
void __iomem *tmp;
int i, ret;
+ device_initialize(&dev->dev);
+
+ /*
+ * Copy from device_add
+ */
+ if (dev->dev.init_name) {
+ dev_set_name(&dev->dev, "%s", dev->dev.init_name);
+ dev->dev.init_name = NULL;
+ }
+
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
dev->dev.dma_mask = &dev->dma_mask;
@@ -240,7 +250,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
goto err_release;
}
- ret = device_register(&dev->dev);
+ ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index fa22f94ca415..a3df9fe566c2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -999,7 +999,9 @@ static void __ata_port_freeze(struct ata_port *ap)
* ata_port_freeze - abort & freeze port
* @ap: ATA port to freeze
*
- * Abort and freeze @ap.
+ * Abort and freeze @ap. The freeze operation must be called
+ * first, because some hardware requires special operations
+ * before the taskfile registers are accessible.
*
* LOCKING:
* spin_lock_irqsave(host lock)
@@ -1013,8 +1015,8 @@ int ata_port_freeze(struct ata_port *ap)
WARN_ON(!ap->ops->error_handler);
- nr_aborted = ata_port_abort(ap);
__ata_port_freeze(ap);
+ nr_aborted = ata_port_abort(ap);
return nr_aborted;
}
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 030ec079b184..d41dcebb746a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -565,6 +565,19 @@ static void sil_freeze(struct ata_port *ap)
tmp |= SIL_MASK_IDE0_INT << ap->port_no;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
+
+ /* Ensure DMA_ENABLE is off.
+ *
+ * This is because the controller will not give us access to the
+ * taskfile registers while a DMA is in progress
+ */
+ iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
+ ap->ioaddr.bmdma_addr);
+
+ /* According to ata_bmdma_stop, an HDMA transition requires
+ * on PIO cycle. But we can't read a taskfile register.
+ */
+ ioread8(ap->ioaddr.bmdma_addr);
}
static void sil_thaw(struct ata_port *ap)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 81cb01bfc356..455e55971d0e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -628,30 +628,6 @@ static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
return ret;
}
-static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
-{
- struct platform_driver *pdrv = to_platform_driver(dev->driver);
- struct platform_device *pdev = to_platform_device(dev);
- int ret = 0;
-
- if (dev->driver && pdrv->suspend_late)
- ret = pdrv->suspend_late(pdev, mesg);
-
- return ret;
-}
-
-static int platform_legacy_resume_early(struct device *dev)
-{
- struct platform_driver *pdrv = to_platform_driver(dev->driver);
- struct platform_device *pdev = to_platform_device(dev);
- int ret = 0;
-
- if (dev->driver && pdrv->resume_early)
- ret = pdrv->resume_early(pdev);
-
- return ret;
-}
-
static int platform_legacy_resume(struct device *dev)
{
struct platform_driver *pdrv = to_platform_driver(dev->driver);
@@ -714,8 +690,6 @@ static int platform_pm_suspend_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->suspend_noirq)
ret = drv->pm->suspend_noirq(dev);
- } else {
- ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
}
return ret;
@@ -750,8 +724,6 @@ static int platform_pm_resume_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->resume_noirq)
ret = drv->pm->resume_noirq(dev);
- } else {
- ret = platform_legacy_resume_early(dev);
}
return ret;
@@ -797,8 +769,6 @@ static int platform_pm_freeze_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->freeze_noirq)
ret = drv->pm->freeze_noirq(dev);
- } else {
- ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
}
return ret;
@@ -833,8 +803,6 @@ static int platform_pm_thaw_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->thaw_noirq)
ret = drv->pm->thaw_noirq(dev);
- } else {
- ret = platform_legacy_resume_early(dev);
}
return ret;
@@ -869,8 +837,6 @@ static int platform_pm_poweroff_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->poweroff_noirq)
ret = drv->pm->poweroff_noirq(dev);
- } else {
- ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
return ret;
@@ -905,8 +871,6 @@ static int platform_pm_restore_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->restore_noirq)
ret = drv->pm->restore_noirq(dev);
- } else {
- ret = platform_legacy_resume_early(dev);
}
return ret;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fae725458981..58a3e572f2c9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -762,6 +762,7 @@ static int dpm_prepare(pm_message_t state)
dev->power.status = DPM_ON;
if (error == -EAGAIN) {
put_device(dev);
+ error = 0;
continue;
}
printk(KERN_ERR "PM: Failed to prepare device %s "
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 668dc234b8e2..dc81bbf81459 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7209,7 +7209,7 @@ static struct pci_driver DAC960_pci_driver = {
.remove = DAC960_Remove,
};
-static int DAC960_init_module(void)
+static int __init DAC960_init_module(void)
{
int ret;
@@ -7221,7 +7221,7 @@ static int DAC960_init_module(void)
return ret;
}
-static void DAC960_cleanup_module(void)
+static void __exit DAC960_cleanup_module(void)
{
int i;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index bb72ada9f074..1d886e079c58 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -298,6 +298,22 @@ config BLK_DEV_NBD
If unsure, say N.
+config BLK_DEV_OSD
+ tristate "OSD object-as-blkdev support"
+ depends on SCSI_OSD_ULD
+ ---help---
+ Saying Y or M here will allow the exporting of a single SCSI
+ OSD (object-based storage) object as a Linux block device.
+
+ For example, if you create a 2G object on an OSD device,
+ you can then use this module to present that 2G object as
+ a Linux block device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called osdblk.
+
+ If unsure, say N.
+
config BLK_DEV_SX8
tristate "Promise SATA SX8 support"
depends on PCI
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 7755a5e2a85e..cdaa3f8fddf0 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
+obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2307a271bdc9..0efb8fccb619 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -265,6 +265,7 @@ aoeblk_gdalloc(void *vp)
}
blk_queue_make_request(&d->blkq, aoeblk_make_request);
+ d->blkq.backing_dev_info.name = "aoe";
if (bdi_init(&d->blkq.backing_dev_info))
goto err_mempool;
spin_lock_irqsave(&d->lock, flags);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c7a527c08a09..65a0655e7fc8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -226,8 +226,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c)
static inline void removeQ(CommandList_struct *c)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
+ /*
+ * After kexec/dump some commands might still
+ * be in flight, which the firmware will try
+ * to complete. Resetting the firmware doesn't work
+ * with old fw revisions, so we have to mark
+ * them off as 'stale' to prevent the driver from
+ * falling over.
+ */
+ if (WARN_ON(hlist_unhashed(&c->list))) {
+ c->cmd_type = CMD_MSG_STALE;
return;
+ }
hlist_del_init(&c->list);
}
@@ -4246,7 +4256,8 @@ static void fail_all_cmds(unsigned long ctlr)
while (!hlist_empty(&h->cmpQ)) {
c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
removeQ(c);
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ if (c->cmd_type != CMD_MSG_STALE)
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
if (c->cmd_type == CMD_RWREQ) {
complete_command(h, c, 0);
} else if (c->cmd_type == CMD_IOCTL_PEND)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd665b00c7c5..dbaed1ea0da3 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct {
#define CMD_SCSI 0x03
#define CMD_MSG_DONE 0x04
#define CMD_MSG_TIMEOUT 0x05
+#define CMD_MSG_STALE 0xff
/* This structure needs to be divisible by 8 for new
* indexing method.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c90181..91b753013780 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
- LOCK_FDC(drive, 1);
+ if (lock_fdc(drive, 1)) {
+ mutex_unlock(&open_lock);
+ return -EINTR;
+ }
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
new file mode 100644
index 000000000000..13c1aee6aa3f
--- /dev/null
+++ b/drivers/block/osdblk.c
@@ -0,0 +1,701 @@
+
+/*
+ osdblk.c -- Export a single SCSI OSD object as a Linux block device
+
+
+ Copyright 2009 Red Hat, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+ Instructions for use
+ --------------------
+
+ 1) Map a Linux block device to an existing OSD object.
+
+ In this example, we will use partition id 1234, object id 5678,
+ OSD device /dev/osd1.
+
+ $ echo "1234 5678 /dev/osd1" > /sys/class/osdblk/add
+
+
+ 2) List all active blkdev<->object mappings.
+
+ In this example, we have performed step #1 twice, creating two blkdevs,
+ mapped to two separate OSD objects.
+
+ $ cat /sys/class/osdblk/list
+ 0 174 1234 5678 /dev/osd1
+ 1 179 1994 897123 /dev/osd0
+
+ The columns, in order, are:
+ - blkdev unique id
+ - blkdev assigned major
+ - OSD object partition id
+ - OSD object id
+ - OSD device
+
+
+ 3) Remove an active blkdev<->object mapping.
+
+ In this example, we remove the mapping with blkdev unique id 1.
+
+ $ echo 1 > /sys/class/osdblk/remove
+
+
+ NOTE: The actual creation and deletion of OSD objects is outside the scope
+ of this driver.
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <scsi/osd_initiator.h>
+#include <scsi/osd_attributes.h>
+#include <scsi/osd_sec.h>
+#include <scsi/scsi_device.h>
+
+#define DRV_NAME "osdblk"
+#define PFX DRV_NAME ": "
+
+/* #define _OSDBLK_DEBUG */
+#ifdef _OSDBLK_DEBUG
+#define OSDBLK_DEBUG(fmt, a...) \
+ printk(KERN_NOTICE "osdblk @%s:%d: " fmt, __func__, __LINE__, ##a)
+#else
+#define OSDBLK_DEBUG(fmt, a...) \
+ do { if (0) printk(fmt, ##a); } while (0)
+#endif
+
+MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
+MODULE_DESCRIPTION("block device inside an OSD object osdblk.ko");
+MODULE_LICENSE("GPL");
+
+struct osdblk_device;
+
+enum {
+ OSDBLK_MINORS_PER_MAJOR = 256, /* max minors per blkdev */
+ OSDBLK_MAX_REQ = 32, /* max parallel requests */
+ OSDBLK_OP_TIMEOUT = 4 * 60, /* sync OSD req timeout */
+};
+
+struct osdblk_request {
+ struct request *rq; /* blk layer request */
+ struct bio *bio; /* cloned bio */
+ struct osdblk_device *osdev; /* associated blkdev */
+};
+
+struct osdblk_device {
+ int id; /* blkdev unique id */
+
+ int major; /* blkdev assigned major */
+ struct gendisk *disk; /* blkdev's gendisk and rq */
+ struct request_queue *q;
+
+ struct osd_dev *osd; /* associated OSD */
+
+ char name[32]; /* blkdev name, e.g. osdblk34 */
+
+ spinlock_t lock; /* queue lock */
+
+ struct osd_obj_id obj; /* OSD partition, obj id */
+ uint8_t obj_cred[OSD_CAP_LEN]; /* OSD cred */
+
+ struct osdblk_request req[OSDBLK_MAX_REQ]; /* request table */
+
+ struct list_head node;
+
+ char osd_path[0]; /* OSD device path */
+};
+
+static struct class *class_osdblk; /* /sys/class/osdblk */
+static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
+static LIST_HEAD(osdblkdev_list);
+
+static struct block_device_operations osdblk_bd_ops = {
+ .owner = THIS_MODULE,
+};
+
+static const struct osd_attr g_attr_logical_length = ATTR_DEF(
+ OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
+
+static void osdblk_make_credential(u8 cred_a[OSD_CAP_LEN],
+ const struct osd_obj_id *obj)
+{
+ osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
+}
+
+/* copied from exofs; move to libosd? */
+/*
+ * Perform a synchronous OSD operation. copied from exofs; move to libosd?
+ */
+static int osd_sync_op(struct osd_request *or, int timeout, uint8_t *credential)
+{
+ int ret;
+
+ or->timeout = timeout;
+ ret = osd_finalize_request(or, 0, credential, NULL);
+ if (ret)
+ return ret;
+
+ ret = osd_execute_request(or);
+
+ /* osd_req_decode_sense(or, ret); */
+ return ret;
+}
+
+/*
+ * Perform an asynchronous OSD operation. copied from exofs; move to libosd?
+ */
+static int osd_async_op(struct osd_request *or, osd_req_done_fn *async_done,
+ void *caller_context, u8 *cred)
+{
+ int ret;
+
+ ret = osd_finalize_request(or, 0, cred, NULL);
+ if (ret)
+ return ret;
+
+ ret = osd_execute_request_async(or, async_done, caller_context);
+
+ return ret;
+}
+
+/* copied from exofs; move to libosd? */
+static int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
+{
+ struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
+ void *iter = NULL;
+ int nelem;
+
+ do {
+ nelem = 1;
+ osd_req_decode_get_attr_list(or, &cur_attr, &nelem, &iter);
+ if ((cur_attr.attr_page == attr->attr_page) &&
+ (cur_attr.attr_id == attr->attr_id)) {
+ attr->len = cur_attr.len;
+ attr->val_ptr = cur_attr.val_ptr;
+ return 0;
+ }
+ } while (iter);
+
+ return -EIO;
+}
+
+static int osdblk_get_obj_size(struct osdblk_device *osdev, u64 *size_out)
+{
+ struct osd_request *or;
+ struct osd_attr attr;
+ int ret;
+
+ /* start request */
+ or = osd_start_request(osdev->osd, GFP_KERNEL);
+ if (!or)
+ return -ENOMEM;
+
+ /* create a get-attributes(length) request */
+ osd_req_get_attributes(or, &osdev->obj);
+
+ osd_req_add_get_attr_list(or, &g_attr_logical_length, 1);
+
+ /* execute op synchronously */
+ ret = osd_sync_op(or, OSDBLK_OP_TIMEOUT, osdev->obj_cred);
+ if (ret)
+ goto out;
+
+ /* extract length from returned attribute info */
+ attr = g_attr_logical_length;
+ ret = extract_attr_from_req(or, &attr);
+ if (ret)
+ goto out;
+
+ *size_out = get_unaligned_be64(attr.val_ptr);
+
+out:
+ osd_end_request(or);
+ return ret;
+
+}
+
+static void osdblk_osd_complete(struct osd_request *or, void *private)
+{
+ struct osdblk_request *orq = private;
+ struct osd_sense_info osi;
+ int ret = osd_req_decode_sense(or, &osi);
+
+ if (ret) {
+ ret = -EIO;
+ OSDBLK_DEBUG("osdblk_osd_complete with err=%d\n", ret);
+ }
+
+ /* complete OSD request */
+ osd_end_request(or);
+
+ /* complete request passed to osdblk by block layer */
+ __blk_end_request_all(orq->rq, ret);
+}
+
+static void bio_chain_put(struct bio *chain)
+{
+ struct bio *tmp;
+
+ while (chain) {
+ tmp = chain;
+ chain = chain->bi_next;
+
+ bio_put(tmp);
+ }
+}
+
+static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
+{
+ struct bio *tmp, *new_chain = NULL, *tail = NULL;
+
+ while (old_chain) {
+ tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
+ if (!tmp)
+ goto err_out;
+
+ __bio_clone(tmp, old_chain);
+ tmp->bi_bdev = NULL;
+ gfpmask &= ~__GFP_WAIT;
+ tmp->bi_next = NULL;
+
+ if (!new_chain)
+ new_chain = tail = tmp;
+ else {
+ tail->bi_next = tmp;
+ tail = tmp;
+ }
+
+ old_chain = old_chain->bi_next;
+ }
+
+ return new_chain;
+
+err_out:
+ OSDBLK_DEBUG("bio_chain_clone with err\n");
+ bio_chain_put(new_chain);
+ return NULL;
+}
+
+static void osdblk_rq_fn(struct request_queue *q)
+{
+ struct osdblk_device *osdev = q->queuedata;
+
+ while (1) {
+ struct request *rq;
+ struct osdblk_request *orq;
+ struct osd_request *or;
+ struct bio *bio;
+ bool do_write, do_flush;
+
+ /* peek at request from block layer */
+ rq = blk_fetch_request(q);
+ if (!rq)
+ break;
+
+ /* filter out block requests we don't understand */
+ if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) {
+ blk_end_request_all(rq, 0);
+ continue;
+ }
+
+ /* deduce our operation (read, write, flush) */
+ /* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
+ * into a clearly defined set of RPC commands:
+ * read, write, flush, scsi command, power mgmt req,
+ * driver-specific, etc.
+ */
+
+ do_flush = (rq->special == (void *) 0xdeadbeefUL);
+ do_write = (rq_data_dir(rq) == WRITE);
+
+ if (!do_flush) { /* osd_flush does not use a bio */
+ /* a bio clone to be passed down to OSD request */
+ bio = bio_chain_clone(rq->bio, GFP_ATOMIC);
+ if (!bio)
+ break;
+ } else
+ bio = NULL;
+
+ /* alloc internal OSD request, for OSD command execution */
+ or = osd_start_request(osdev->osd, GFP_ATOMIC);
+ if (!or) {
+ bio_chain_put(bio);
+ OSDBLK_DEBUG("osd_start_request with err\n");
+ break;
+ }
+
+ orq = &osdev->req[rq->tag];
+ orq->rq = rq;
+ orq->bio = bio;
+ orq->osdev = osdev;
+
+ /* init OSD command: flush, write or read */
+ if (do_flush)
+ osd_req_flush_object(or, &osdev->obj,
+ OSD_CDB_FLUSH_ALL, 0, 0);
+ else if (do_write)
+ osd_req_write(or, &osdev->obj, blk_rq_pos(rq) * 512ULL,
+ bio, blk_rq_bytes(rq));
+ else
+ osd_req_read(or, &osdev->obj, blk_rq_pos(rq) * 512ULL,
+ bio, blk_rq_bytes(rq));
+
+ OSDBLK_DEBUG("%s 0x%x bytes at 0x%llx\n",
+ do_flush ? "flush" : do_write ?
+ "write" : "read", blk_rq_bytes(rq),
+ blk_rq_pos(rq) * 512ULL);
+
+ /* begin OSD command execution */
+ if (osd_async_op(or, osdblk_osd_complete, orq,
+ osdev->obj_cred)) {
+ osd_end_request(or);
+ blk_requeue_request(q, rq);
+ bio_chain_put(bio);
+ OSDBLK_DEBUG("osd_execute_request_async with err\n");
+ break;
+ }
+
+ /* remove the special 'flush' marker, now that the command
+ * is executing
+ */
+ rq->special = NULL;
+ }
+}
+
+static void osdblk_prepare_flush(struct request_queue *q, struct request *rq)
+{
+ /* add driver-specific marker, to indicate that this request
+ * is a flush command
+ */
+ rq->special = (void *) 0xdeadbeefUL;
+}
+
+static void osdblk_free_disk(struct osdblk_device *osdev)
+{
+ struct gendisk *disk = osdev->disk;
+
+ if (!disk)
+ return;
+
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+ put_disk(disk);
+}
+
+static int osdblk_init_disk(struct osdblk_device *osdev)
+{
+ struct gendisk *disk;
+ struct request_queue *q;
+ int rc;
+ u64 obj_size = 0;
+
+ /* contact OSD, request size info about the object being mapped */
+ rc = osdblk_get_obj_size(osdev, &obj_size);
+ if (rc)
+ return rc;
+
+ /* create gendisk info */
+ disk = alloc_disk(OSDBLK_MINORS_PER_MAJOR);
+ if (!disk)
+ return -ENOMEM;
+
+ sprintf(disk->disk_name, DRV_NAME "%d", osdev->id);
+ disk->major = osdev->major;
+ disk->first_minor = 0;
+ disk->fops = &osdblk_bd_ops;
+ disk->private_data = osdev;
+
+ /* init rq */
+ q = blk_init_queue(osdblk_rq_fn, &osdev->lock);
+ if (!q) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+
+ /* switch queue to TCQ mode; allocate tag map */
+ rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL);
+ if (rc) {
+ blk_cleanup_queue(q);
+ put_disk(disk);
+ return rc;
+ }
+
+ /* Set our limits to the lower device limits, because osdblk cannot
+ * sleep when allocating a lower-request and therefore cannot be
+ * bouncing.
+ */
+ blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
+
+ blk_queue_prep_rq(q, blk_queue_start_tag);
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush);
+
+ disk->queue = q;
+
+ q->queuedata = osdev;
+
+ osdev->disk = disk;
+ osdev->q = q;
+
+ /* finally, announce the disk to the world */
+ set_capacity(disk, obj_size / 512ULL);
+ add_disk(disk);
+
+ printk(KERN_INFO "%s: Added of size 0x%llx\n",
+ disk->disk_name, (unsigned long long)obj_size);
+
+ return 0;
+}
+
+/********************************************************************
+ * /sys/class/osdblk/
+ * add map OSD object to blkdev
+ * remove unmap OSD object
+ * list show mappings
+ *******************************************************************/
+
+static void class_osdblk_release(struct class *cls)
+{
+ kfree(cls);
+}
+
+static ssize_t class_osdblk_list(struct class *c, char *data)
+{
+ int n = 0;
+ struct list_head *tmp;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ list_for_each(tmp, &osdblkdev_list) {
+ struct osdblk_device *osdev;
+
+ osdev = list_entry(tmp, struct osdblk_device, node);
+
+ n += sprintf(data+n, "%d %d %llu %llu %s\n",
+ osdev->id,
+ osdev->major,
+ osdev->obj.partition,
+ osdev->obj.id,
+ osdev->osd_path);
+ }
+
+ mutex_unlock(&ctl_mutex);
+ return n;
+}
+
+static ssize_t class_osdblk_add(struct class *c, const char *buf, size_t count)
+{
+ struct osdblk_device *osdev;
+ ssize_t rc;
+ int irc, new_id = 0;
+ struct list_head *tmp;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ /* new osdblk_device object */
+ osdev = kzalloc(sizeof(*osdev) + strlen(buf) + 1, GFP_KERNEL);
+ if (!osdev) {
+ rc = -ENOMEM;
+ goto err_out_mod;
+ }
+
+ /* static osdblk_device initialization */
+ spin_lock_init(&osdev->lock);
+ INIT_LIST_HEAD(&osdev->node);
+
+ /* generate unique id: find highest unique id, add one */
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ list_for_each(tmp, &osdblkdev_list) {
+ struct osdblk_device *osdev;
+
+ osdev = list_entry(tmp, struct osdblk_device, node);
+ if (osdev->id > new_id)
+ new_id = osdev->id + 1;
+ }
+
+ osdev->id = new_id;
+
+ /* add to global list */
+ list_add_tail(&osdev->node, &osdblkdev_list);
+
+ mutex_unlock(&ctl_mutex);
+
+ /* parse add command */
+ if (sscanf(buf, "%llu %llu %s", &osdev->obj.partition, &osdev->obj.id,
+ osdev->osd_path) != 3) {
+ rc = -EINVAL;
+ goto err_out_slot;
+ }
+
+ /* initialize rest of new object */
+ sprintf(osdev->name, DRV_NAME "%d", osdev->id);
+
+ /* contact requested OSD */
+ osdev->osd = osduld_path_lookup(osdev->osd_path);
+ if (IS_ERR(osdev->osd)) {
+ rc = PTR_ERR(osdev->osd);
+ goto err_out_slot;
+ }
+
+ /* build OSD credential */
+ osdblk_make_credential(osdev->obj_cred, &osdev->obj);
+
+ /* register our block device */
+ irc = register_blkdev(0, osdev->name);
+ if (irc < 0) {
+ rc = irc;
+ goto err_out_osd;
+ }
+
+ osdev->major = irc;
+
+ /* set up and announce blkdev mapping */
+ rc = osdblk_init_disk(osdev);
+ if (rc)
+ goto err_out_blkdev;
+
+ return count;
+
+err_out_blkdev:
+ unregister_blkdev(osdev->major, osdev->name);
+err_out_osd:
+ osduld_put_device(osdev->osd);
+err_out_slot:
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ list_del_init(&osdev->node);
+ mutex_unlock(&ctl_mutex);
+
+ kfree(osdev);
+err_out_mod:
+ OSDBLK_DEBUG("Error adding device %s\n", buf);
+ module_put(THIS_MODULE);
+ return rc;
+}
+
+static ssize_t class_osdblk_remove(struct class *c, const char *buf,
+ size_t count)
+{
+ struct osdblk_device *osdev = NULL;
+ int target_id, rc;
+ unsigned long ul;
+ struct list_head *tmp;
+
+ rc = strict_strtoul(buf, 10, &ul);
+ if (rc)
+ return rc;
+
+ /* convert to int; abort if we lost anything in the conversion */
+ target_id = (int) ul;
+ if (target_id != ul)
+ return -EINVAL;
+
+ /* remove object from list immediately */
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ list_for_each(tmp, &osdblkdev_list) {
+ osdev = list_entry(tmp, struct osdblk_device, node);
+ if (osdev->id == target_id) {
+ list_del_init(&osdev->node);
+ break;
+ }
+ osdev = NULL;
+ }
+
+ mutex_unlock(&ctl_mutex);
+
+ if (!osdev)
+ return -ENOENT;
+
+ /* clean up and free blkdev and associated OSD connection */
+ osdblk_free_disk(osdev);
+ unregister_blkdev(osdev->major, osdev->name);
+ osduld_put_device(osdev->osd);
+ kfree(osdev);
+
+ /* release module ref */
+ module_put(THIS_MODULE);
+
+ return count;
+}
+
+static struct class_attribute class_osdblk_attrs[] = {
+ __ATTR(add, 0200, NULL, class_osdblk_add),
+ __ATTR(remove, 0200, NULL, class_osdblk_remove),
+ __ATTR(list, 0444, class_osdblk_list, NULL),
+ __ATTR_NULL
+};
+
+static int osdblk_sysfs_init(void)
+{
+ int ret = 0;
+
+ /*
+ * create control files in sysfs
+ * /sys/class/osdblk/...
+ */
+ class_osdblk = kzalloc(sizeof(*class_osdblk), GFP_KERNEL);
+ if (!class_osdblk)
+ return -ENOMEM;
+
+ class_osdblk->name = DRV_NAME;
+ class_osdblk->owner = THIS_MODULE;
+ class_osdblk->class_release = class_osdblk_release;
+ class_osdblk->class_attrs = class_osdblk_attrs;
+
+ ret = class_register(class_osdblk);
+ if (ret) {
+ kfree(class_osdblk);
+ class_osdblk = NULL;
+ printk(PFX "failed to create class osdblk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void osdblk_sysfs_cleanup(void)
+{
+ if (class_osdblk)
+ class_destroy(class_osdblk);
+ class_osdblk = NULL;
+}
+
+static int __init osdblk_init(void)
+{
+ int rc;
+
+ rc = osdblk_sysfs_init();
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void __exit osdblk_exit(void)
+{
+ osdblk_sysfs_cleanup();
+}
+
+module_init(osdblk_init);
+module_exit(osdblk_exit);
+
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 43db3ea15b54..fbeefb68a31f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -213,7 +213,7 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
- return -ENOIOCTLCMD;
+ return -ENOTTY;
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
@@ -360,6 +360,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
+ /* No need to bounce any requests */
+ blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
+
/* No real sector limit. */
blk_queue_max_sectors(vblk->disk->queue, -1U);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e70c57ee4221..124db8cd144c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -301,7 +301,7 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
struct urb *urb;
unsigned char *buf;
unsigned int pipe;
- int err, size;
+ int err, size = HCI_MAX_FRAME_SIZE;
BT_DBG("%s", hdev->name);
@@ -312,8 +312,6 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
- size = le16_to_cpu(data->bulk_rx_ep->wMaxPacketSize);
-
buf = kmalloc(size, mem_flags);
if (!buf) {
usb_free_urb(urb);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0bd01f49cfd8..6a06913b01d3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1029,10 +1029,6 @@ config CS5535_GPIO
If compiled as a module, it will be called cs5535_gpio.
-config GPIO_VR41XX
- tristate "NEC VR4100 series General-purpose I/O Unit support"
- depends on CPU_VR41XX
-
config RAW_DRIVER
tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 189efcff08ce..66f779ad4f4c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -95,7 +95,6 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
-obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 140ea10ecb88..c02db01f736e 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -27,6 +27,7 @@
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mm.h>
+#include <asm/pgtable.h>
#include <asm/io.h>
/*
@@ -75,12 +76,13 @@ static struct class *bsr_class;
static int bsr_major;
enum {
- BSR_8 = 0,
- BSR_16 = 1,
- BSR_64 = 2,
- BSR_128 = 3,
- BSR_UNKNOWN = 4,
- BSR_MAX = 5,
+ BSR_8 = 0,
+ BSR_16 = 1,
+ BSR_64 = 2,
+ BSR_128 = 3,
+ BSR_4096 = 4,
+ BSR_UNKNOWN = 5,
+ BSR_MAX = 6,
};
static unsigned bsr_types[BSR_MAX];
@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
struct bsr_dev *dev = filp->private_data;
+ int ret;
- if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
- return -EINVAL;
-
- vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT,
- size, vma->vm_page_prot))
+ /* check for the case of a small BSR device and map one 4k page for it*/
+ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+ ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+ vma->vm_page_prot);
+ else if (size <= dev->bsr_len)
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ dev->bsr_addr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ else
+ return -EINVAL;
+
+ if (ret)
return -EAGAIN;
return 0;
@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
+ /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+ /* we can only map 4k of it, so only advertise the 4k in sysfs */
+ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+ cur->bsr_len = 4096;
+
switch(cur->bsr_bytes) {
case 8:
cur->bsr_type = BSR_8;
@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn)
case 128:
cur->bsr_type = BSR_128;
break;
+ case 4096:
+ cur->bsr_type = BSR_4096;
+ break;
default:
cur->bsr_type = BSR_UNKNOWN;
- printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
}
cur->bsr_num = bsr_types[cur->bsr_type];
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index f3366d3f06cf..dbfa8b50a5e8 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -11,7 +11,7 @@
* Initially written by Randolph Bentson <bentson@grieg.seaslug.org>.
* Modified and maintained by Marcio Saito <marcio@cyclades.com>.
*
- * Copyright (C) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (C) 2007-2009 Jiri Slaby <jirislaby@gmail.com>
*
* Much of the design and some of the code came from serial.c
* which was copyright (C) 1991, 1992 Linus Torvalds. It was
@@ -19,577 +19,9 @@
* and then fixed as suggested by Michael K. Johnson 12/12/92.
* Converted to pci probing and cleaned up by Jiri Slaby.
*
- * This version supports shared IRQ's (only for PCI boards).
- *
- * Prevent users from opening non-existing Z ports.
- *
- * Revision 2.3.2.8 2000/07/06 18:14:16 ivan
- * Fixed the PCI detection function to work properly on Alpha systems.
- * Implemented support for TIOCSERGETLSR ioctl.
- * Implemented full support for non-standard baud rates.
- *
- * Revision 2.3.2.7 2000/06/01 18:26:34 ivan
- * Request PLX I/O region, although driver doesn't use it, to avoid
- * problems with other drivers accessing it.
- * Removed count for on-board buffer characters in cy_chars_in_buffer
- * (Cyclades-Z only).
- *
- * Revision 2.3.2.6 2000/05/05 13:56:05 ivan
- * Driver now reports physical instead of virtual memory addresses.
- * Masks were added to some Cyclades-Z read accesses.
- * Implemented workaround for PLX9050 bug that would cause a system lockup
- * in certain systems, depending on the MMIO addresses allocated to the
- * board.
- * Changed the Tx interrupt programming in the CD1400 chips to boost up
- * performance (Cyclom-Y only).
- * Code is now compliant with the new module interface (module_[init|exit]).
- * Make use of the PCI helper functions to access PCI resources.
- * Did some code "housekeeping".
- *
- * Revision 2.3.2.5 2000/01/19 14:35:33 ivan
- * Fixed bug in cy_set_termios on CRTSCTS flag turnoff.
- *
- * Revision 2.3.2.4 2000/01/17 09:19:40 ivan
- * Fixed SMP locking in Cyclom-Y interrupt handler.
- *
- * Revision 2.3.2.3 1999/12/28 12:11:39 ivan
- * Added a new cyclades_card field called nports to allow the driver to
- * know the exact number of ports found by the Z firmware after its load;
- * RX buffer contention prevention logic on interrupt op mode revisited
- * (Cyclades-Z only);
- * Revisited printk's for Z debug;
- * Driver now makes sure that the constant SERIAL_XMIT_SIZE is defined;
- *
- * Revision 2.3.2.2 1999/10/01 11:27:43 ivan
- * Fixed bug in cyz_poll that would make all ports but port 0
- * unable to transmit/receive data (Cyclades-Z only);
- * Implemented logic to prevent the RX buffer from being stuck with data
- * due to a driver / firmware race condition in interrupt op mode
- * (Cyclades-Z only);
- * Fixed bug in block_til_ready logic that would lead to a system crash;
- * Revisited cy_close spinlock usage;
- *
- * Revision 2.3.2.1 1999/09/28 11:01:22 ivan
- * Revisited CONFIG_PCI conditional compilation for PCI board support;
- * Implemented TIOCGICOUNT and TIOCMIWAIT ioctl support;
- * _Major_ cleanup on the Cyclades-Z interrupt support code / logic;
- * Removed CTS handling from the driver -- this is now completely handled
- * by the firmware (Cyclades-Z only);
- * Flush RX on-board buffers on a port open (Cyclades-Z only);
- * Fixed handling of ASYNC_SPD_* TTY flags;
- * Module unload now unmaps all memory area allocated by ioremap;
- *
- * Revision 2.3.1.1 1999/07/15 16:45:53 ivan
- * Removed CY_PROC conditional compilation;
- * Implemented SMP-awareness for the driver;
- * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off]
- * functions;
- * The driver now accepts memory addresses (maddr=0xMMMMM) and IRQs
- * (irq=NN) as parameters (only for ISA boards);
- * Fixed bug in set_line_char that would prevent the Cyclades-Z
- * ports from being configured at speeds above 115.2Kbps;
- * Fixed bug in cy_set_termios that would prevent XON/XOFF flow control
- * switching from working properly;
- * The driver now only prints IRQ info for the Cyclades-Z if it's
- * configured to work in interrupt mode;
- *
- * Revision 2.2.2.3 1999/06/28 11:13:29 ivan
- * Added support for interrupt mode operation for the Z cards;
- * Removed the driver inactivity control for the Z;
- * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when
- * the Z firmware is not loaded yet;
- * Replaced the "manual" Z Tx flush buffer by a call to a FW command of
- * same functionality;
- * Implemented workaround for IRQ setting loss on the PCI configuration
- * registers after a PCI bridge EEPROM reload (affects PLX9060 only);
- *
- * Revision 2.2.2.2 1999/05/14 17:18:15 ivan
- * /proc entry location changed to /proc/tty/driver/cyclades;
- * Added support to shared IRQ's (only for PCI boards);
- * Added support for Cobalt Qube2 systems;
- * IRQ [de]allocation scheme revisited;
- * BREAK implementation changed in order to make use of the 'break_ctl'
- * TTY facility;
- * Fixed typo in TTY structure field 'driver_name';
- * Included a PCI bridge reset and EEPROM reload in the board
- * initialization code (for both Y and Z series).
- *
- * Revision 2.2.2.1 1999/04/08 16:17:43 ivan
- * Fixed a bug in cy_wait_until_sent that was preventing the port to be
- * closed properly after a SIGINT;
- * Module usage counter scheme revisited;
- * Added support to the upcoming Y PCI boards (i.e., support to additional
- * PCI Device ID's).
- *
- * Revision 2.2.1.10 1999/01/20 16:14:29 ivan
- * Removed all unnecessary page-alignement operations in ioremap calls
- * (ioremap is currently safe for these operations).
- *
- * Revision 2.2.1.9 1998/12/30 18:18:30 ivan
- * Changed access to PLX PCI bridge registers from I/O to MMIO, in
- * order to make PLX9050-based boards work with certain motherboards.
- *
- * Revision 2.2.1.8 1998/11/13 12:46:20 ivan
- * cy_close function now resets (correctly) the tty->closing flag;
- * JIFFIES_DIFF macro fixed.
- *
- * Revision 2.2.1.7 1998/09/03 12:07:28 ivan
- * Fixed bug in cy_close function, which was not informing HW of
- * which port should have the reception disabled before doing so;
- * fixed Cyclom-8YoP hardware detection bug.
- *
- * Revision 2.2.1.6 1998/08/20 17:15:39 ivan
- * Fixed bug in cy_close function, which causes malfunction
- * of one of the first 4 ports when a higher port is closed
- * (Cyclom-Y only).
- *
- * Revision 2.2.1.5 1998/08/10 18:10:28 ivan
- * Fixed Cyclom-4Yo hardware detection bug.
- *
- * Revision 2.2.1.4 1998/08/04 11:02:50 ivan
- * /proc/cyclades implementation with great collaboration of
- * Marc Lewis <marc@blarg.net>;
- * cyy_interrupt was changed to avoid occurrence of kernel oopses
- * during PPP operation.
- *
- * Revision 2.2.1.3 1998/06/01 12:09:10 ivan
- * General code review in order to comply with 2.1 kernel standards;
- * data loss prevention for slow devices revisited (cy_wait_until_sent
- * was created);
- * removed conditional compilation for new/old PCI structure support
- * (now the driver only supports the new PCI structure).
- *
- * Revision 2.2.1.1 1998/03/19 16:43:12 ivan
- * added conditional compilation for new/old PCI structure support;
- * removed kernel series (2.0.x / 2.1.x) conditional compilation.
- *
- * Revision 2.1.1.3 1998/03/16 18:01:12 ivan
- * cleaned up the data loss fix;
- * fixed XON/XOFF handling once more (Cyclades-Z);
- * general review of the driver routines;
- * introduction of a mechanism to prevent data loss with slow
- * printers, by forcing a delay before closing the port.
- *
- * Revision 2.1.1.2 1998/02/17 16:50:00 ivan
- * fixed detection/handling of new CD1400 in Ye boards;
- * fixed XON/XOFF handling (Cyclades-Z);
- * fixed data loss caused by a premature port close;
- * introduction of a flag that holds the CD1400 version ID per port
- * (used by the CYGETCD1400VER new ioctl).
- *
- * Revision 2.1.1.1 1997/12/03 17:31:19 ivan
- * Code review for the module cleanup routine;
- * fixed RTS and DTR status report for new CD1400's in get_modem_info;
- * includes anonymous changes regarding signal_pending.
- *
- * Revision 2.1 1997/11/01 17:42:41 ivan
- * Changes in the driver to support Alpha systems (except 8Zo V_1);
- * BREAK fix for the Cyclades-Z boards;
- * driver inactivity control by FW implemented;
- * introduction of flag that allows driver to take advantage of
- * a special CD1400 feature related to HW flow control;
- * added support for the CD1400 rev. J (Cyclom-Y boards);
- * introduction of ioctls to:
- * - control the rtsdtr_inv flag (Cyclom-Y);
- * - control the rflow flag (Cyclom-Y);
- * - adjust the polling interval (Cyclades-Z);
- *
- * Revision 1.36.4.33 1997/06/27 19:00:00 ivan
- * Fixes related to kernel version conditional
- * compilation.
- *
- * Revision 1.36.4.32 1997/06/14 19:30:00 ivan
- * Compatibility issues between kernels 2.0.x and
- * 2.1.x (mainly related to clear_bit function).
- *
- * Revision 1.36.4.31 1997/06/03 15:30:00 ivan
- * Changes to define the memory window according to the
- * board type.
- *
- * Revision 1.36.4.30 1997/05/16 15:30:00 daniel
- * Changes to support new cycladesZ boards.
- *
- * Revision 1.36.4.29 1997/05/12 11:30:00 daniel
- * Merge of Bentson's and Daniel's version 1.36.4.28.
- * Corrects bug in cy_detect_pci: check if there are more
- * ports than the number of static structs allocated.
- * Warning message during initialization if this driver is
- * used with the new generation of cycladesZ boards. Those
- * will be supported only in next release of the driver.
- * Corrects bug in cy_detect_pci and cy_detect_isa that
- * returned wrong number of VALID boards, when a cyclomY
- * was found with no serial modules connected.
- * Changes to use current (2.1.x) kernel subroutine names
- * and created macros for compilation with 2.0.x kernel,
- * instead of the other way around.
- *
- * Revision 1.36.4.28 1997/05/?? ??:00:00 bentson
- * Change queue_task_irq_off to queue_task_irq.
- * The inline function queue_task_irq_off (tqueue.h)
- * was removed from latest releases of 2.1.x kernel.
- * Use of macro __init to mark the initialization
- * routines, so memory can be reused.
- * Also incorporate implementation of critical region
- * in function cleanup_module() created by anonymous
- * linuxer.
- *
- * Revision 1.36.4.28 1997/04/25 16:00:00 daniel
- * Change to support new firmware that solves DCD problem:
- * application could fail to receive SIGHUP signal when DCD
- * varying too fast.
- *
- * Revision 1.36.4.27 1997/03/26 10:30:00 daniel
- * Changed for support linux versions 2.1.X.
- * Backward compatible with linux versions 2.0.X.
- * Corrected illegal use of filler field in
- * CH_CTRL struct.
- * Deleted some debug messages.
- *
- * Revision 1.36.4.26 1997/02/27 12:00:00 daniel
- * Included check for NULL tty pointer in cyz_poll.
- *
- * Revision 1.36.4.25 1997/02/26 16:28:30 bentson
- * Bill Foster at Blarg! Online services noticed that
- * some of the switch elements of -Z modem control
- * lacked a closing "break;"
- *
- * Revision 1.36.4.24 1997/02/24 11:00:00 daniel
- * Changed low water threshold for buffer xmit_buf
- *
- * Revision 1.36.4.23 1996/12/02 21:50:16 bentson
- * Marcio provided fix to modem status fetch for -Z
- *
- * Revision 1.36.4.22 1996/10/28 22:41:17 bentson
- * improve mapping of -Z control page (thanks to Steve
- * Price <stevep@fa.tdktca.com> for help on this)
- *
- * Revision 1.36.4.21 1996/09/10 17:00:10 bentson
- * shift from CPU-bound to memcopy in cyz_polling operation
- *
- * Revision 1.36.4.20 1996/09/09 18:30:32 Bentson
- * Added support to set and report higher speeds.
- *
- * Revision 1.36.4.19c 1996/08/09 10:00:00 Marcio Saito
- * Some fixes in the HW flow control for the BETA release.
- * Don't try to register the IRQ.
- *
- * Revision 1.36.4.19 1996/08/08 16:23:18 Bentson
- * make sure "cyc" appears in all kernel messages; all soft interrupts
- * handled by same routine; recognize out-of-band reception; comment
- * out some diagnostic messages; leave RTS/CTS flow control to hardware;
- * fix race condition in -Z buffer management; only -Y needs to explicitly
- * flush chars; tidy up some startup messages;
- *
- * Revision 1.36.4.18 1996/07/25 18:57:31 bentson
- * shift MOD_INC_USE_COUNT location to match
- * serial.c; purge some diagnostic messages;
- *
- * Revision 1.36.4.17 1996/07/25 18:01:08 bentson
- * enable modem status messages and fetch & process them; note
- * time of last activity type for each port; set_line_char now
- * supports more than line 0 and treats 0 baud correctly;
- * get_modem_info senses rs_status;
- *
- * Revision 1.36.4.16 1996/07/20 08:43:15 bentson
- * barely works--now's time to turn on
- * more features 'til it breaks
- *
- * Revision 1.36.4.15 1996/07/19 22:30:06 bentson
- * check more -Z board status; shorten boot message
- *
- * Revision 1.36.4.14 1996/07/19 22:20:37 bentson
- * fix reference to ch_ctrl in startup; verify return
- * values from cyz_issue_cmd and cyz_update_channel;
- * more stuff to get modem control correct;
- *
- * Revision 1.36.4.13 1996/07/11 19:53:33 bentson
- * more -Z stuff folded in; re-order changes to put -Z stuff
- * after -Y stuff (to make changes clearer)
- *
- * Revision 1.36.4.12 1996/07/11 15:40:55 bentson
- * Add code to poll Cyclades-Z. Add code to get & set RS-232 control.
- * Add code to send break. Clear firmware ID word at startup (so
- * that other code won't talk to inactive board).
- *
- * Revision 1.36.4.11 1996/07/09 05:28:29 bentson
- * add code for -Z in set_line_char
- *
- * Revision 1.36.4.10 1996/07/08 19:28:37 bentson
- * fold more -Z stuff (or in some cases, error messages)
- * into driver; add text to "don't know what to do" messages.
- *
- * Revision 1.36.4.9 1996/07/08 18:38:38 bentson
- * moved compile-time flags near top of file; cosmetic changes
- * to narrow text (to allow 2-up printing); changed many declarations
- * to "static" to limit external symbols; shuffled code order to
- * coalesce -Y and -Z specific code, also to put internal functions
- * in order of tty_driver structure; added code to recognize -Z
- * ports (and for moment, do nothing or report error); add cy_startup
- * to parse boot command line for extra base addresses for ISA probes;
- *
- * Revision 1.36.4.8 1996/06/25 17:40:19 bentson
- * reorder some code, fix types of some vars (int vs. long),
- * add cy_setup to support user declared ISA addresses
- *
- * Revision 1.36.4.7 1996/06/21 23:06:18 bentson
- * dump ioctl based firmware load (it's now a user level
- * program); ensure uninitialzed ports cannot be used
- *
- * Revision 1.36.4.6 1996/06/20 23:17:19 bentson
- * rename vars and restructure some code
- *
- * Revision 1.36.4.5 1996/06/14 15:09:44 bentson
- * get right status back after boot load
- *
- * Revision 1.36.4.4 1996/06/13 19:51:44 bentson
- * successfully loads firmware
- *
- * Revision 1.36.4.3 1996/06/13 06:08:33 bentson
- * add more of the code for the boot/load ioctls
- *
- * Revision 1.36.4.2 1996/06/11 21:00:51 bentson
- * start to add Z functionality--starting with ioctl
- * for loading firmware
- *
- * Revision 1.36.4.1 1996/06/10 18:03:02 bentson
- * added code to recognize Z/PCI card at initialization; report
- * presence, but card is not initialized (because firmware needs
- * to be loaded)
- *
- * Revision 1.36.3.8 1996/06/07 16:29:00 bentson
- * starting minor number at zero; added missing verify_area
- * as noted by Heiko Eißfeldt <heiko@colossus.escape.de>
- *
- * Revision 1.36.3.7 1996/04/19 21:06:18 bentson
- * remove unneeded boot message & fix CLOCAL hardware flow
- * control (Miquel van Smoorenburg <miquels@Q.cistron.nl>);
- * remove unused diagnostic statements; minor 0 is first;
- *
- * Revision 1.36.3.6 1996/03/13 13:21:17 marcio
- * The kernel function vremap (available only in later 1.3.xx kernels)
- * allows the access to memory addresses above the RAM. This revision
- * of the driver supports PCI boards below 1Mb (device id 0x100) and
- * above 1Mb (device id 0x101).
- *
- * Revision 1.36.3.5 1996/03/07 15:20:17 bentson
- * Some global changes to interrupt handling spilled into
- * this driver--mostly unused arguments in system function
- * calls. Also added change by Marcio Saito which should
- * reduce lost interrupts at startup by fast processors.
- *
- * Revision 1.36.3.4 1995/11/13 20:45:10 bentson
- * Changes by Corey Minyard <minyard@wf-rch.cirr.com> distributed
- * in 1.3.41 kernel to remove a possible race condition, extend
- * some error messages, and let the driver run as a loadable module
- * Change by Alan Wendt <alan@ez0.ezlink.com> to remove a
- * possible race condition.
- * Change by Marcio Saito <marcio@cyclades.com> to fix PCI addressing.
- *
- * Revision 1.36.3.3 1995/11/13 19:44:48 bentson
- * Changes by Linus Torvalds in 1.3.33 kernel distribution
- * required due to reordering of driver initialization.
- * Drivers are now initialized *after* memory management.
- *
- * Revision 1.36.3.2 1995/09/08 22:07:14 bentson
- * remove printk from ISR; fix typo
- *
- * Revision 1.36.3.1 1995/09/01 12:00:42 marcio
- * Minor fixes in the PCI board support. PCI function calls in
- * conditional compilation (CONFIG_PCI). Thanks to Jim Duncan
- * <duncan@okay.com>. "bad serial count" message removed.
- *
- * Revision 1.36.3 1995/08/22 09:19:42 marcio
- * Cyclom-Y/PCI support added. Changes in the cy_init routine and
- * board initialization. Changes in the boot messages. The driver
- * supports up to 4 boards and 64 ports by default.
- *
- * Revision 1.36.1.4 1995/03/29 06:14:14 bentson
- * disambiguate between Cyclom-16Y and Cyclom-32Ye;
- *
- * Revision 1.36.1.3 1995/03/23 22:15:35 bentson
- * add missing break in modem control block in ioctl switch statement
- * (discovered by Michael Edward Chastain <mec@jobe.shell.portal.com>);
- *
- * Revision 1.36.1.2 1995/03/22 19:16:22 bentson
- * make sure CTS flow control is set as soon as possible (thanks
- * to note from David Lambert <lambert@chesapeake.rps.slb.com>);
- *
- * Revision 1.36.1.1 1995/03/13 15:44:43 bentson
- * initialize defaults for receive threshold and stale data timeout;
- * cosmetic changes;
- *
- * Revision 1.36 1995/03/10 23:33:53 bentson
- * added support of chips 4-7 in 32 port Cyclom-Ye;
- * fix cy_interrupt pointer dereference problem
- * (Joe Portman <baron@aa.net>);
- * give better error response if open is attempted on non-existent port
- * (Zachariah Vaum <jchryslr@netcom.com>);
- * correct command timeout (Kenneth Lerman <lerman@@seltd.newnet.com>);
- * conditional compilation for -16Y on systems with fast, noisy bus;
- * comment out diagnostic print function;
- * cleaned up table of base addresses;
- * set receiver time-out period register to correct value,
- * set receive threshold to better default values,
- * set chip timer to more accurate 200 Hz ticking,
- * add code to monitor and modify receive parameters
- * (Rik Faith <faith@cs.unc.edu> Nick Simicich
- * <njs@scifi.emi.net>);
- *
- * Revision 1.35 1994/12/16 13:54:18 steffen
- * additional patch by Marcio Saito for board detection
- * Accidently left out in 1.34
- *
- * Revision 1.34 1994/12/10 12:37:12 steffen
- * This is the corrected version as suggested by Marcio Saito
- *
- * Revision 1.33 1994/12/01 22:41:18 bentson
- * add hooks to support more high speeds directly; add tytso
- * patch regarding CLOCAL wakeups
- *
- * Revision 1.32 1994/11/23 19:50:04 bentson
- * allow direct kernel control of higher signalling rates;
- * look for cards at additional locations
- *
- * Revision 1.31 1994/11/16 04:33:28 bentson
- * ANOTHER fix from Corey Minyard, minyard@wf-rch.cirr.com--
- * a problem in chars_in_buffer has been resolved by some
- * small changes; this should yield smoother output
- *
- * Revision 1.30 1994/11/16 04:28:05 bentson
- * Fix from Corey Minyard, Internet: minyard@metronet.com,
- * UUCP: minyard@wf-rch.cirr.com, WORK: minyardbnr.ca, to
- * cy_hangup that appears to clear up much (all?) of the
- * DTR glitches; also he's added/cleaned-up diagnostic messages
- *
- * Revision 1.29 1994/11/16 04:16:07 bentson
- * add change proposed by Ralph Sims, ralphs@halcyon.com, to
- * operate higher speeds in same way as other serial ports;
- * add more serial ports (for up to two 16-port muxes).
- *
- * Revision 1.28 1994/11/04 00:13:16 root
- * turn off diagnostic messages
- *
- * Revision 1.27 1994/11/03 23:46:37 root
- * bunch of changes to bring driver into greater conformance
- * with the serial.c driver (looking for missed fixes)
- *
- * Revision 1.26 1994/11/03 22:40:36 root
- * automatic interrupt probing fixed.
- *
- * Revision 1.25 1994/11/03 20:17:02 root
- * start to implement auto-irq
- *
- * Revision 1.24 1994/11/03 18:01:55 root
- * still working on modem signals--trying not to drop DTR
- * during the getty/login processes
- *
- * Revision 1.23 1994/11/03 17:51:36 root
- * extend baud rate support; set receive threshold as function
- * of baud rate; fix some problems with RTS/CTS;
- *
- * Revision 1.22 1994/11/02 18:05:35 root
- * changed arguments to udelay to type long to get
- * delays to be of correct duration
- *
- * Revision 1.21 1994/11/02 17:37:30 root
- * employ udelay (after calibrating loops_per_second earlier
- * in init/main.c) instead of using home-grown delay routines
- *
- * Revision 1.20 1994/11/02 03:11:38 root
- * cy_chars_in_buffer forces a return value of 0 to let
- * login work (don't know why it does); some functions
- * that were returning EFAULT, now executes the code;
- * more work on deciding when to disable xmit interrupts;
- *
- * Revision 1.19 1994/11/01 20:10:14 root
- * define routine to start transmission interrupts (by enabling
- * transmit interrupts); directly enable/disable modem interrupts;
- *
- * Revision 1.18 1994/11/01 18:40:45 bentson
- * Don't always enable transmit interrupts in startup; interrupt on
- * TxMpty instead of TxRdy to help characters get out before shutdown;
- * restructure xmit interrupt to check for chars first and quit if
- * none are ready to go; modem status (MXVRx) is upright, _not_ inverted
- * (to my view);
- *
- * Revision 1.17 1994/10/30 04:39:45 bentson
- * rename serial_driver and callout_driver to cy_serial_driver and
- * cy_callout_driver to avoid linkage interference; initialize
- * info->type to PORT_CIRRUS; ruggedize paranoia test; elide ->port
- * from cyclades_port structure; add paranoia check to cy_close;
- *
- * Revision 1.16 1994/10/30 01:14:33 bentson
- * change major numbers; add some _early_ return statements;
- *
- * Revision 1.15 1994/10/29 06:43:15 bentson
- * final tidying up for clean compile; enable some error reporting
- *
- * Revision 1.14 1994/10/28 20:30:22 Bentson
- * lots of changes to drag the driver towards the new tty_io
- * structures and operation. not expected to work, but may
- * compile cleanly.
- *
- * Revision 1.13 1994/07/21 23:08:57 Bentson
- * add some diagnostic cruft; support 24 lines (for testing
- * both -8Y and -16Y cards; be more thorough in servicing all
- * chips during interrupt; add "volatile" a few places to
- * circumvent compiler optimizations; fix base & offset
- * computations in block_til_ready (was causing chip 0 to
- * stop operation)
- *
- * Revision 1.12 1994/07/19 16:42:11 Bentson
- * add some hackery for kernel version 1.1.8; expand
- * error messages; refine timing for delay loops and
- * declare loop params volatile
- *
- * Revision 1.11 1994/06/11 21:53:10 bentson
- * get use of save_car right in transmit interrupt service
- *
- * Revision 1.10.1.1 1994/06/11 21:31:18 bentson
- * add some diagnostic printing; try to fix save_car stuff
- *
- * Revision 1.10 1994/06/11 20:36:08 bentson
- * clean up compiler warnings
- *
- * Revision 1.9 1994/06/11 19:42:46 bentson
- * added a bunch of code to support modem signalling
- *
- * Revision 1.8 1994/06/11 17:57:07 bentson
- * recognize break & parity error
- *
- * Revision 1.7 1994/06/05 05:51:34 bentson
- * Reorder baud table to be monotonic; add cli to CP; discard
- * incoming characters and status if the line isn't open; start to
- * fold code into cy_throttle; start to port get_serial_info,
- * set_serial_info, get_modem_info, set_modem_info, and send_break
- * from serial.c; expand cy_ioctl; relocate and expand config_setup;
- * get flow control characters from tty struct; invalidate ports w/o
- * hardware;
- *
- * Revision 1.6 1994/05/31 18:42:21 bentson
- * add a loop-breaker in the interrupt service routine;
- * note when port is initialized so that it can be shut
- * down under the right conditions; receive works without
- * any obvious errors
- *
- * Revision 1.5 1994/05/30 00:55:02 bentson
- * transmit works without obvious errors
- *
- * Revision 1.4 1994/05/27 18:46:27 bentson
- * incorporated more code from lib_y.c; can now print short
- * strings under interrupt control to port zero; seems to
- * select ports/channels/lines correctly
- *
- * Revision 1.3 1994/05/25 22:12:44 bentson
- * shifting from multi-port on a card to proper multiplexor
- * data structures; added skeletons of most routines
- *
- * Revision 1.2 1994/05/19 13:21:43 bentson
- * start to crib from other sources
- *
*/
-#define CY_VERSION "2.5"
+#define CY_VERSION "2.6"
/* If you need to install more boards than NR_CARDS, change the constant
in the definition below. No other change is necessary to support up to
@@ -647,9 +79,7 @@
#include <linux/firmware.h>
#include <linux/device.h>
-#include <asm/system.h>
#include <linux/io.h>
-#include <asm/irq.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
@@ -659,13 +89,11 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-static void cy_throttle(struct tty_struct *tty);
static void cy_send_xchar(struct tty_struct *tty, char ch);
#ifndef SERIAL_XMIT_SIZE
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
-#define WAKEUP_CHARS 256
#define STD_COM_FLAGS (0)
@@ -755,25 +183,25 @@ static int cy_next_channel; /* next minor available */
* HI VHI
* 20
*/
-static int baud_table[] = {
+static const int baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200,
1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000,
230400, 0
};
-static char baud_co_25[] = { /* 25 MHz clock option table */
+static const char baud_co_25[] = { /* 25 MHz clock option table */
/* value => 00 01 02 03 04 */
/* divide by 8 32 128 512 2048 */
0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02,
0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-static char baud_bpr_25[] = { /* 25 MHz baud rate period table */
+static const char baud_bpr_25[] = { /* 25 MHz baud rate period table */
0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3,
0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15
};
-static char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */
+static const char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */
/* value => 00 01 02 03 04 */
/* divide by 8 32 128 512 2048 */
0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03,
@@ -781,13 +209,13 @@ static char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */
0x00
};
-static char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */
+static const char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */
0x00, 0x82, 0x21, 0xff, 0xdb, 0xc3, 0x92, 0x62, 0xc3, 0x62,
0x41, 0xc3, 0x62, 0xc3, 0x62, 0xc3, 0x82, 0x62, 0x41, 0x32,
0x21
};
-static char baud_cor3[] = { /* receive threshold */
+static const char baud_cor3[] = { /* receive threshold */
0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07,
0x07
@@ -804,7 +232,7 @@ static char baud_cor3[] = { /* receive threshold */
* cables.
*/
-static char rflow_thr[] = { /* rflow threshold */
+static const char rflow_thr[] = { /* rflow threshold */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
0x0a
@@ -813,7 +241,7 @@ static char rflow_thr[] = { /* rflow threshold */
/* The Cyclom-Ye has placed the sequential chips in non-sequential
* address order. This look-up table overcomes that problem.
*/
-static int cy_chip_offset[] = { 0x0000,
+static const unsigned int cy_chip_offset[] = { 0x0000,
0x0400,
0x0800,
0x0C00,
@@ -826,7 +254,7 @@ static int cy_chip_offset[] = { 0x0000,
/* PCI related definitions */
#ifdef CONFIG_PCI
-static struct pci_device_id cy_pci_dev_id[] __devinitdata = {
+static const struct pci_device_id cy_pci_dev_id[] = {
/* PCI < 1Mb */
{ PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },
/* PCI > 1Mb */
@@ -849,7 +277,7 @@ MODULE_DEVICE_TABLE(pci, cy_pci_dev_id);
#endif
static void cy_start(struct tty_struct *);
-static void set_line_char(struct cyclades_port *);
+static void cy_set_line_char(struct cyclades_port *, struct tty_struct *);
static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32);
#ifdef CONFIG_ISA
static unsigned detect_isa_irq(void __iomem *);
@@ -868,6 +296,20 @@ static void cyz_rx_restart(unsigned long);
static struct timer_list cyz_rx_full_timer[NR_PORTS];
#endif /* CONFIG_CYZ_INTR */
+static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val)
+{
+ struct cyclades_card *card = port->card;
+
+ cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val);
+}
+
+static inline u8 cyy_readb(struct cyclades_port *port, u32 reg)
+{
+ struct cyclades_card *card = port->card;
+
+ return readb(port->u.cyy.base_addr + (reg << card->bus_index));
+}
+
static inline bool cy_is_Z(struct cyclades_card *card)
{
return card->num_chips == (unsigned int)-1;
@@ -892,7 +334,7 @@ static inline bool cyz_is_loaded(struct cyclades_card *card)
}
static inline int serial_paranoia_check(struct cyclades_port *info,
- char *name, const char *routine)
+ const char *name, const char *routine)
{
#ifdef SERIAL_PARANOIA_CHECK
if (!info) {
@@ -908,7 +350,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info,
}
#endif
return 0;
-} /* serial_paranoia_check */
+}
/***********************************************************/
/********* Start of block of Cyclom-Y specific code ********/
@@ -920,13 +362,14 @@ static inline int serial_paranoia_check(struct cyclades_port *info,
This function is only called from inside spinlock-protected code.
*/
-static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index)
+static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index)
{
+ void __iomem *ccr = base_addr + (CyCCR << index);
unsigned int i;
/* Check to see that the previous command has completed */
for (i = 0; i < 100; i++) {
- if (readb(base_addr + (CyCCR << index)) == 0)
+ if (readb(ccr) == 0)
break;
udelay(10L);
}
@@ -936,10 +379,16 @@ static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index)
return -1;
/* Issue the new command */
- cy_writeb(base_addr + (CyCCR << index), cmd);
+ cy_writeb(ccr, cmd);
return 0;
-} /* cyy_issue_cmd */
+}
+
+static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd)
+{
+ return __cyy_issue_cmd(port->u.cyy.base_addr, cmd,
+ port->card->bus_index);
+}
#ifdef CONFIG_ISA
/* ISA interrupt detection code */
@@ -959,12 +408,12 @@ static unsigned detect_isa_irq(void __iomem *address)
irqs = probe_irq_on();
/* Wait ... */
- udelay(5000L);
+ msleep(5);
/* Enable the Tx interrupts on the CD1400 */
local_irq_save(flags);
cy_writeb(address + (CyCAR << index), 0);
- cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index);
+ __cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index);
cy_writeb(address + (CyCAR << index), 0);
cy_writeb(address + (CySRER << index),
@@ -972,7 +421,7 @@ static unsigned detect_isa_irq(void __iomem *address)
local_irq_restore(flags);
/* Wait ... */
- udelay(5000L);
+ msleep(5);
/* Check which interrupt is in use */
irq = probe_irq_off(irqs);
@@ -998,7 +447,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
struct cyclades_port *info;
struct tty_struct *tty;
int len, index = cinfo->bus_index;
- u8 save_xir, channel, save_car, data, char_count;
+ u8 ivr, save_xir, channel, save_car, data, char_count;
#ifdef CY_DEBUG_INTERRUPTS
printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip);
@@ -1007,26 +456,25 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
save_xir = readb(base_addr + (CyRIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
- save_car = readb(base_addr + (CyCAR << index));
- cy_writeb(base_addr + (CyCAR << index), save_xir);
+ save_car = cyy_readb(info, CyCAR);
+ cyy_writeb(info, CyCAR, save_xir);
+ ivr = cyy_readb(info, CyRIVR) & CyIVRMask;
+ tty = tty_port_tty_get(&info->port);
/* if there is nowhere to put the data, discard it */
- if (info->port.tty == NULL) {
- if ((readb(base_addr + (CyRIVR << index)) & CyIVRMask) ==
- CyIVRRxEx) { /* exception */
- data = readb(base_addr + (CyRDSR << index));
+ if (tty == NULL) {
+ if (ivr == CyIVRRxEx) { /* exception */
+ data = cyy_readb(info, CyRDSR);
} else { /* normal character reception */
- char_count = readb(base_addr + (CyRDCR << index));
+ char_count = cyy_readb(info, CyRDCR);
while (char_count--)
- data = readb(base_addr + (CyRDSR << index));
+ data = cyy_readb(info, CyRDSR);
}
goto end;
}
/* there is an open port for this data */
- tty = info->port.tty;
- if ((readb(base_addr + (CyRIVR << index)) & CyIVRMask) ==
- CyIVRRxEx) { /* exception */
- data = readb(base_addr + (CyRDSR << index));
+ if (ivr == CyIVRRxEx) { /* exception */
+ data = cyy_readb(info, CyRDSR);
/* For statistics only */
if (data & CyBREAK)
@@ -1040,28 +488,29 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
if (data & info->ignore_status_mask) {
info->icount.rx++;
+ tty_kref_put(tty);
return;
}
if (tty_buffer_request_room(tty, 1)) {
if (data & info->read_status_mask) {
if (data & CyBREAK) {
tty_insert_flip_char(tty,
- readb(base_addr + (CyRDSR <<
- index)), TTY_BREAK);
+ cyy_readb(info, CyRDSR),
+ TTY_BREAK);
info->icount.rx++;
if (info->port.flags & ASYNC_SAK)
do_SAK(tty);
} else if (data & CyFRAME) {
tty_insert_flip_char(tty,
- readb(base_addr + (CyRDSR <<
- index)), TTY_FRAME);
+ cyy_readb(info, CyRDSR),
+ TTY_FRAME);
info->icount.rx++;
info->idle_stats.frame_errs++;
} else if (data & CyPARITY) {
/* Pieces of seven... */
tty_insert_flip_char(tty,
- readb(base_addr + (CyRDSR <<
- index)), TTY_PARITY);
+ cyy_readb(info, CyRDSR),
+ TTY_PARITY);
info->icount.rx++;
info->idle_stats.parity_errs++;
} else if (data & CyOVERRUN) {
@@ -1073,8 +522,8 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
the next incoming character.
*/
tty_insert_flip_char(tty,
- readb(base_addr + (CyRDSR <<
- index)), TTY_FRAME);
+ cyy_readb(info, CyRDSR),
+ TTY_FRAME);
info->icount.rx++;
info->idle_stats.overruns++;
/* These two conditions may imply */
@@ -1098,7 +547,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
}
} else { /* normal character reception */
/* load # chars available from the chip */
- char_count = readb(base_addr + (CyRDCR << index));
+ char_count = cyy_readb(info, CyRDCR);
#ifdef CY_ENABLE_MONITORING
++info->mon.int_count;
@@ -1109,7 +558,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
#endif
len = tty_buffer_request_room(tty, char_count);
while (len--) {
- data = readb(base_addr + (CyRDSR << index));
+ data = cyy_readb(info, CyRDSR);
tty_insert_flip_char(tty, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
@@ -1120,16 +569,18 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
info->idle_stats.recv_idle = jiffies;
}
tty_schedule_flip(tty);
+ tty_kref_put(tty);
end:
/* end of service */
- cy_writeb(base_addr + (CyRIR << index), save_xir & 0x3f);
- cy_writeb(base_addr + (CyCAR << index), save_car);
+ cyy_writeb(info, CyRIR, save_xir & 0x3f);
+ cyy_writeb(info, CyCAR, save_car);
}
static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
+ struct tty_struct *tty;
int char_count, index = cinfo->bus_index;
u8 save_xir, channel, save_car, outch;
@@ -1153,9 +604,9 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
goto end;
}
info = &cinfo->ports[channel + chip * 4];
- if (info->port.tty == NULL) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) & ~CyTxRdy);
+ tty = tty_port_tty_get(&info->port);
+ if (tty == NULL) {
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
goto end;
}
@@ -1164,7 +615,7 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
if (info->x_char) { /* send special char */
outch = info->x_char;
- cy_writeb(base_addr + (CyTDR << index), outch);
+ cyy_writeb(info, CyTDR, outch);
char_count--;
info->icount.tx++;
info->x_char = 0;
@@ -1172,14 +623,14 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
if (info->breakon || info->breakoff) {
if (info->breakon) {
- cy_writeb(base_addr + (CyTDR << index), 0);
- cy_writeb(base_addr + (CyTDR << index), 0x81);
+ cyy_writeb(info, CyTDR, 0);
+ cyy_writeb(info, CyTDR, 0x81);
info->breakon = 0;
char_count -= 2;
}
if (info->breakoff) {
- cy_writeb(base_addr + (CyTDR << index), 0);
- cy_writeb(base_addr + (CyTDR << index), 0x83);
+ cyy_writeb(info, CyTDR, 0);
+ cyy_writeb(info, CyTDR, 0x83);
info->breakoff = 0;
char_count -= 2;
}
@@ -1187,27 +638,23 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
while (char_count-- > 0) {
if (!info->xmit_cnt) {
- if (readb(base_addr + (CySRER << index)) & CyTxMpty) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) &
- ~CyTxMpty);
+ if (cyy_readb(info, CySRER) & CyTxMpty) {
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxMpty);
} else {
- cy_writeb(base_addr + (CySRER << index),
- (readb(base_addr + (CySRER << index)) &
- ~CyTxRdy) | CyTxMpty);
+ cyy_writeb(info, CySRER, CyTxMpty |
+ (cyy_readb(info, CySRER) & ~CyTxRdy));
}
goto done;
}
if (info->port.xmit_buf == NULL) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) &
- ~CyTxRdy);
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxRdy);
goto done;
}
- if (info->port.tty->stopped || info->port.tty->hw_stopped) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) &
- ~CyTxRdy);
+ if (tty->stopped || tty->hw_stopped) {
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxRdy);
goto done;
}
/* Because the Embedded Transmit Commands have been enabled,
@@ -1224,15 +671,15 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
- cy_writeb(base_addr + (CyTDR << index), outch);
+ cyy_writeb(info, CyTDR, outch);
info->icount.tx++;
} else {
if (char_count > 1) {
info->xmit_cnt--;
info->xmit_tail = (info->xmit_tail + 1) &
(SERIAL_XMIT_SIZE - 1);
- cy_writeb(base_addr + (CyTDR << index), outch);
- cy_writeb(base_addr + (CyTDR << index), 0);
+ cyy_writeb(info, CyTDR, outch);
+ cyy_writeb(info, CyTDR, 0);
info->icount.tx++;
char_count--;
}
@@ -1240,17 +687,19 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
}
done:
- tty_wakeup(info->port.tty);
+ tty_wakeup(tty);
+ tty_kref_put(tty);
end:
/* end of service */
- cy_writeb(base_addr + (CyTIR << index), save_xir & 0x3f);
- cy_writeb(base_addr + (CyCAR << index), save_car);
+ cyy_writeb(info, CyTIR, save_xir & 0x3f);
+ cyy_writeb(info, CyCAR, save_car);
}
static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
void __iomem *base_addr)
{
struct cyclades_port *info;
+ struct tty_struct *tty;
int index = cinfo->bus_index;
u8 save_xir, channel, save_car, mdm_change, mdm_status;
@@ -1258,13 +707,14 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
save_xir = readb(base_addr + (CyMIR << index));
channel = save_xir & CyIRChannel;
info = &cinfo->ports[channel + chip * 4];
- save_car = readb(base_addr + (CyCAR << index));
- cy_writeb(base_addr + (CyCAR << index), save_xir);
+ save_car = cyy_readb(info, CyCAR);
+ cyy_writeb(info, CyCAR, save_xir);
- mdm_change = readb(base_addr + (CyMISR << index));
- mdm_status = readb(base_addr + (CyMSVR1 << index));
+ mdm_change = cyy_readb(info, CyMISR);
+ mdm_status = cyy_readb(info, CyMSVR1);
- if (!info->port.tty)
+ tty = tty_port_tty_get(&info->port);
+ if (!tty)
goto end;
if (mdm_change & CyANY_DELTA) {
@@ -1282,31 +732,28 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
}
if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) {
- if (!(mdm_status & CyDCD)) {
- tty_hangup(info->port.tty);
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
- }
- wake_up_interruptible(&info->port.open_wait);
+ if (mdm_status & CyDCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else
+ tty_hangup(tty);
}
if ((mdm_change & CyCTS) && (info->port.flags & ASYNC_CTS_FLOW)) {
- if (info->port.tty->hw_stopped) {
+ if (tty->hw_stopped) {
if (mdm_status & CyCTS) {
/* cy_start isn't used
because... !!! */
- info->port.tty->hw_stopped = 0;
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) |
- CyTxRdy);
- tty_wakeup(info->port.tty);
+ tty->hw_stopped = 0;
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) | CyTxRdy);
+ tty_wakeup(tty);
}
} else {
if (!(mdm_status & CyCTS)) {
/* cy_stop isn't used
because ... !!! */
- info->port.tty->hw_stopped = 1;
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) &
- ~CyTxRdy);
+ tty->hw_stopped = 1;
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxRdy);
}
}
}
@@ -1314,10 +761,11 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
}
if (mdm_change & CyRI) {
}*/
+ tty_kref_put(tty);
end:
/* end of service */
- cy_writeb(base_addr + (CyMIR << index), save_xir & 0x3f);
- cy_writeb(base_addr + (CyCAR << index), save_car);
+ cyy_writeb(info, CyMIR, save_xir & 0x3f);
+ cyy_writeb(info, CyCAR, save_car);
}
/* The real interrupt service routine is called
@@ -1388,6 +836,56 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
} /* cyy_interrupt */
+static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set,
+ unsigned int clear)
+{
+ struct cyclades_card *card = info->card;
+ int channel = info->line - card->first_line;
+ u32 rts, dtr, msvrr, msvrd;
+
+ channel &= 0x03;
+
+ if (info->rtsdtr_inv) {
+ msvrr = CyMSVR2;
+ msvrd = CyMSVR1;
+ rts = CyDTR;
+ dtr = CyRTS;
+ } else {
+ msvrr = CyMSVR1;
+ msvrd = CyMSVR2;
+ rts = CyRTS;
+ dtr = CyDTR;
+ }
+ if (set & TIOCM_RTS) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrr, rts);
+ }
+ if (clear & TIOCM_RTS) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrr, ~rts);
+ }
+ if (set & TIOCM_DTR) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrd, dtr);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n");
+ printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
+ cyy_readb(info, CyMSVR1),
+ cyy_readb(info, CyMSVR2));
+#endif
+ }
+ if (clear & TIOCM_DTR) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrd, ~dtr);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n");
+ printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
+ cyy_readb(info, CyMSVR1),
+ cyy_readb(info, CyMSVR2));
+#endif
+ }
+}
+
/***********************************************************/
/********* End of block of Cyclom-Y specific code **********/
/******** Start of block of Cyclades-Z specific code *******/
@@ -1397,15 +895,9 @@ static int
cyz_fetch_msg(struct cyclades_card *cinfo,
__u32 *channel, __u8 *cmd, __u32 *param)
{
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
+ struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
unsigned long loc_doorbell;
- firm_id = cinfo->base_addr + ID_ADDRESS;
- zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
-
loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
if (loc_doorbell) {
*cmd = (char)(0xff & loc_doorbell);
@@ -1421,19 +913,13 @@ static int
cyz_issue_cmd(struct cyclades_card *cinfo,
__u32 channel, __u8 cmd, __u32 param)
{
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
+ struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
__u32 __iomem *pci_doorbell;
unsigned int index;
- firm_id = cinfo->base_addr + ID_ADDRESS;
if (!cyz_is_loaded(cinfo))
return -1;
- zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
-
index = 0;
pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
while ((readl(pci_doorbell) & 0xff) != 0) {
@@ -1448,11 +934,10 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
return 0;
} /* cyz_issue_cmd */
-static void cyz_handle_rx(struct cyclades_port *info,
- struct BUF_CTRL __iomem *buf_ctrl)
+static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
{
+ struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
- struct tty_struct *tty = info->port.tty;
unsigned int char_count;
int len;
#ifdef BLOCKMOVE
@@ -1541,11 +1026,10 @@ static void cyz_handle_rx(struct cyclades_port *info,
}
}
-static void cyz_handle_tx(struct cyclades_port *info,
- struct BUF_CTRL __iomem *buf_ctrl)
+static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
{
+ struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
struct cyclades_card *cinfo = info->card;
- struct tty_struct *tty = info->port.tty;
u8 data;
unsigned int char_count;
#ifdef BLOCKMOVE
@@ -1620,34 +1104,24 @@ ztxdone:
static void cyz_handle_cmd(struct cyclades_card *cinfo)
{
+ struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
struct tty_struct *tty;
struct cyclades_port *info;
- static struct FIRM_ID __iomem *firm_id;
- static struct ZFW_CTRL __iomem *zfw_ctrl;
- static struct BOARD_CTRL __iomem *board_ctrl;
- static struct CH_CTRL __iomem *ch_ctrl;
- static struct BUF_CTRL __iomem *buf_ctrl;
__u32 channel, param, fw_ver;
__u8 cmd;
int special_count;
int delta_count;
- firm_id = cinfo->base_addr + ID_ADDRESS;
- zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
fw_ver = readl(&board_ctrl->fw_version);
while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) {
special_count = 0;
delta_count = 0;
info = &cinfo->ports[channel];
- tty = info->port.tty;
+ tty = tty_port_tty_get(&info->port);
if (tty == NULL)
continue;
- ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
- buf_ctrl = &(zfw_ctrl->buf_ctrl[channel]);
-
switch (cmd) {
case C_CM_PR_ERROR:
tty_insert_flip_char(tty, 0, TTY_PARITY);
@@ -1668,15 +1142,12 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
info->icount.dcd++;
delta_count++;
if (info->port.flags & ASYNC_CHECK_CD) {
- if ((fw_ver > 241 ? ((u_long) param) :
- readl(&ch_ctrl->rs_status)) &
- C_RS_DCD) {
- wake_up_interruptible(&info->port.open_wait);
- } else {
- tty_hangup(info->port.tty);
+ u32 dcd = fw_ver > 241 ? param :
+ readl(&info->u.cyz.ch_ctrl->rs_status);
+ if (dcd & C_RS_DCD)
wake_up_interruptible(&info->port.open_wait);
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
- }
+ else
+ tty_hangup(tty);
}
break;
case C_CM_MCTS:
@@ -1705,7 +1176,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, "
"port %ld\n", info->card, channel);
#endif
- cyz_handle_rx(info, buf_ctrl);
+ cyz_handle_rx(info, tty);
break;
case C_CM_TXBEMPTY:
case C_CM_TXLOWWM:
@@ -1715,7 +1186,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, "
"port %ld\n", info->card, channel);
#endif
- cyz_handle_tx(info, buf_ctrl);
+ cyz_handle_tx(info, tty);
break;
#endif /* CONFIG_CYZ_INTR */
case C_CM_FATAL:
@@ -1728,6 +1199,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
wake_up_interruptible(&info->delta_msr_wait);
if (special_count)
tty_schedule_flip(tty);
+ tty_kref_put(tty);
}
}
@@ -1773,10 +1245,6 @@ static void cyz_poll(unsigned long arg)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
- struct tty_struct *tty;
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
unsigned long expires = jiffies + HZ;
unsigned int port, card;
@@ -1788,10 +1256,6 @@ static void cyz_poll(unsigned long arg)
if (!cyz_is_loaded(cinfo))
continue;
- firm_id = cinfo->base_addr + ID_ADDRESS;
- zfw_ctrl = cinfo->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
-
/* Skip first polling cycle to avoid racing conditions with the FW */
if (!cinfo->intr_enabled) {
cinfo->intr_enabled = 1;
@@ -1801,13 +1265,17 @@ static void cyz_poll(unsigned long arg)
cyz_handle_cmd(cinfo);
for (port = 0; port < cinfo->nports; port++) {
+ struct tty_struct *tty;
+
info = &cinfo->ports[port];
- tty = info->port.tty;
- buf_ctrl = &(zfw_ctrl->buf_ctrl[port]);
+ tty = tty_port_tty_get(&info->port);
+ /* OK to pass NULL to the handle functions below.
+ They need to drop the data in that case. */
if (!info->throttle)
- cyz_handle_rx(info, buf_ctrl);
- cyz_handle_tx(info, buf_ctrl);
+ cyz_handle_rx(info, tty);
+ cyz_handle_tx(info, tty);
+ tty_kref_put(tty);
}
/* poll every 'cyz_polling_cycle' period */
expires = jiffies + cyz_polling_cycle;
@@ -1823,13 +1291,12 @@ static void cyz_poll(unsigned long arg)
/* This is called whenever a port becomes active;
interrupts are enabled and DTR & RTS are turned on.
*/
-static int startup(struct cyclades_port *info)
+static int cy_startup(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
int retval = 0;
- void __iomem *base_addr;
- int chip, channel, index;
+ int channel;
unsigned long page;
card = info->card;
@@ -1841,15 +1308,11 @@ static int startup(struct cyclades_port *info)
spin_lock_irqsave(&card->card_lock, flags);
- if (info->port.flags & ASYNC_INITIALIZED) {
- free_page(page);
+ if (info->port.flags & ASYNC_INITIALIZED)
goto errout;
- }
if (!info->type) {
- if (info->port.tty)
- set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- free_page(page);
+ set_bit(TTY_IO_ERROR, &tty->flags);
goto errout;
}
@@ -1860,96 +1323,53 @@ static int startup(struct cyclades_port *info)
spin_unlock_irqrestore(&card->card_lock, flags);
- set_line_char(info);
+ cy_set_line_char(info, tty);
if (!cy_is_Z(card)) {
- chip = channel >> 2;
channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc startup card %d, chip %d, channel %d, "
- "base_addr %p\n",
- card, chip, channel, base_addr);
-#endif
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
+ cyy_writeb(info, CyCAR, channel);
- cy_writeb(base_addr + (CyRTPR << index),
+ cyy_writeb(info, CyRTPR,
(info->default_timeout ? info->default_timeout : 0x02));
/* 10ms rx timeout */
- cyy_issue_cmd(base_addr, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR,
- index);
-
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
- cy_writeb(base_addr + (CyMSVR1 << index), CyRTS);
- cy_writeb(base_addr + (CyMSVR2 << index), CyDTR);
-
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:startup raising DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
-#endif
+ cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR);
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) | CyRxData);
- info->port.flags |= ASYNC_INITIALIZED;
-
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- info->breakon = info->breakoff = 0;
- memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
- info->idle_stats.in_use =
- info->idle_stats.recv_idle =
- info->idle_stats.xmit_idle = jiffies;
-
- spin_unlock_irqrestore(&card->card_lock, flags);
+ cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData);
} else {
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
- struct CH_CTRL __iomem *ch_ctrl;
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
- base_addr = card->base_addr;
-
- firm_id = base_addr + ID_ADDRESS;
if (!cyz_is_loaded(card))
return -ENODEV;
- zfw_ctrl = card->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
- ch_ctrl = zfw_ctrl->ch_ctrl;
-
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc startup Z card %d, channel %d, "
- "base_addr %p\n", card, channel, base_addr);
+ "base_addr %p\n", card, channel, card->base_addr);
#endif
spin_lock_irqsave(&card->card_lock, flags);
- cy_writel(&ch_ctrl[channel].op_mode, C_CH_ENABLE);
+ cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE);
#ifdef Z_WAKE
#ifdef CONFIG_CYZ_INTR
- cy_writel(&ch_ctrl[channel].intr_enable,
+ cy_writel(&ch_ctrl->intr_enable,
C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD);
#else
- cy_writel(&ch_ctrl[channel].intr_enable,
+ cy_writel(&ch_ctrl->intr_enable,
C_IN_IOCTLW | C_IN_MDCD);
#endif /* CONFIG_CYZ_INTR */
#else
#ifdef CONFIG_CYZ_INTR
- cy_writel(&ch_ctrl[channel].intr_enable,
+ cy_writel(&ch_ctrl->intr_enable,
C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
C_IN_RXNNDT | C_IN_MDCD);
#else
- cy_writel(&ch_ctrl[channel].intr_enable, C_IN_MDCD);
+ cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD);
#endif /* CONFIG_CYZ_INTR */
#endif /* Z_WAKE */
@@ -1968,32 +1388,22 @@ static int startup(struct cyclades_port *info)
/* set timeout !!! */
/* set RTS and DTR !!! */
- cy_writel(&ch_ctrl[channel].rs_control,
- readl(&ch_ctrl[channel].rs_control) | C_RS_RTS |
- C_RS_DTR);
- retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
- if (retval != 0) {
- printk(KERN_ERR "cyc:startup(3) retval on ttyC%d was "
- "%x\n", info->line, retval);
- }
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:startup raising Z DTR\n");
-#endif
+ tty_port_raise_dtr_rts(&info->port);
/* enable send, recv, modem !!! */
+ }
- info->port.flags |= ASYNC_INITIALIZED;
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- info->breakon = info->breakoff = 0;
- memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
- info->idle_stats.in_use =
- info->idle_stats.recv_idle =
- info->idle_stats.xmit_idle = jiffies;
+ info->port.flags |= ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ info->breakon = info->breakoff = 0;
+ memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
+ info->idle_stats.in_use =
+ info->idle_stats.recv_idle =
+ info->idle_stats.xmit_idle = jiffies;
+
+ spin_unlock_irqrestore(&card->card_lock, flags);
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc startup done\n");
@@ -2002,28 +1412,20 @@ static int startup(struct cyclades_port *info)
errout:
spin_unlock_irqrestore(&card->card_lock, flags);
+ free_page(page);
return retval;
} /* startup */
static void start_xmit(struct cyclades_port *info)
{
- struct cyclades_card *card;
+ struct cyclades_card *card = info->card;
unsigned long flags;
- void __iomem *base_addr;
- int chip, channel, index;
+ int channel = info->line - card->first_line;
- card = info->card;
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index), channel);
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) | CyTxRdy);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
#ifdef CONFIG_CYZ_INTR
@@ -2046,12 +1448,11 @@ static void start_xmit(struct cyclades_port *info)
* This routine shuts down a serial port; interrupts are disabled,
* and DTR is dropped if the hangup on close termio flag is on.
*/
-static void shutdown(struct cyclades_port *info)
+static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
- void __iomem *base_addr;
- int chip, channel, index;
+ int channel;
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
@@ -2059,17 +1460,6 @@ static void shutdown(struct cyclades_port *info)
card = info->card;
channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc shutdown Y card %d, chip %d, "
- "channel %d, base_addr %p\n",
- card, chip, channel, base_addr);
-#endif
-
spin_lock_irqsave(&card->card_lock, flags);
/* Clear delta_msr_wait queue to avoid mem leaks. */
@@ -2081,47 +1471,25 @@ static void shutdown(struct cyclades_port *info)
info->port.xmit_buf = NULL;
free_page((unsigned long)temp);
}
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
- if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) {
- cy_writeb(base_addr + (CyMSVR1 << index), ~CyRTS);
- cy_writeb(base_addr + (CyMSVR2 << index), ~CyDTR);
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc shutdown dropping DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
-#endif
- }
- cyy_issue_cmd(base_addr, CyCHAN_CTL | CyDIS_RCVR, index);
+ if (tty->termios->c_cflag & HUPCL)
+ cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR);
+
+ cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR);
/* it may be appropriate to clear _XMIT at
some later date (after testing)!!! */
- if (info->port.tty)
- set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
- struct CH_CTRL __iomem *ch_ctrl;
- int retval;
-
- base_addr = card->base_addr;
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, "
- "base_addr %p\n", card, channel, base_addr);
+ "base_addr %p\n", card, channel, card->base_addr);
#endif
- firm_id = base_addr + ID_ADDRESS;
if (!cyz_is_loaded(card))
return;
- zfw_ctrl = card->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
- ch_ctrl = zfw_ctrl->ch_ctrl;
-
spin_lock_irqsave(&card->card_lock, flags);
if (info->port.xmit_buf) {
@@ -2131,23 +1499,10 @@ static void shutdown(struct cyclades_port *info)
free_page((unsigned long)temp);
}
- if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) {
- cy_writel(&ch_ctrl[channel].rs_control,
- (__u32)(readl(&ch_ctrl[channel].rs_control) &
- ~(C_RS_RTS | C_RS_DTR)));
- retval = cyz_issue_cmd(info->card, channel,
- C_CM_IOCTLM, 0L);
- if (retval != 0) {
- printk(KERN_ERR"cyc:shutdown retval on ttyC%d "
- "was %x\n", info->line, retval);
- }
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:shutdown dropping Z DTR\n");
-#endif
- }
+ if (tty->termios->c_cflag & HUPCL)
+ tty_port_lower_dtr_rts(&info->port);
- if (info->port.tty)
- set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
@@ -2164,199 +1519,6 @@ static void shutdown(struct cyclades_port *info)
* ------------------------------------------------------------
*/
-static int
-block_til_ready(struct tty_struct *tty, struct file *filp,
- struct cyclades_port *info)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct cyclades_card *cinfo;
- unsigned long flags;
- int chip, channel, index;
- int retval;
- void __iomem *base_addr;
-
- cinfo = info->card;
- channel = info->line - cinfo->first_line;
-
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
- wait_event_interruptible(info->port.close_wait,
- !(info->port.flags & ASYNC_CLOSING));
- return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
- }
-
- /*
- * If non-blocking mode is set, then make the check up front
- * and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- info->port.flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->port.count is dropped by one, so that
- * cy_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&info->port.open_wait, &wait);
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc block_til_ready before block: ttyC%d, "
- "count = %d\n", info->line, info->port.count);
-#endif
- spin_lock_irqsave(&cinfo->card_lock, flags);
- if (!tty_hung_up_p(filp))
- info->port.count--;
- spin_unlock_irqrestore(&cinfo->card_lock, flags);
-#ifdef CY_DEBUG_COUNT
- printk(KERN_DEBUG "cyc block_til_ready: (%d): decrementing count to "
- "%d\n", current->pid, info->port.count);
-#endif
- info->port.blocked_open++;
-
- if (!cy_is_Z(cinfo)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = cinfo->bus_index;
- base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
-
- while (1) {
- spin_lock_irqsave(&cinfo->card_lock, flags);
- if ((tty->termios->c_cflag & CBAUD)) {
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- cy_writeb(base_addr + (CyMSVR1 << index),
- CyRTS);
- cy_writeb(base_addr + (CyMSVR2 << index),
- CyDTR);
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:block_til_ready raising "
- "DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
-#endif
- }
- spin_unlock_irqrestore(&cinfo->card_lock, flags);
-
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(info->port.flags & ASYNC_INITIALIZED)) {
- retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
- -EAGAIN : -ERESTARTSYS);
- break;
- }
-
- spin_lock_irqsave(&cinfo->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (!(info->port.flags & ASYNC_CLOSING) && (C_CLOCAL(tty) ||
- (readb(base_addr +
- (CyMSVR1 << index)) & CyDCD))) {
- spin_unlock_irqrestore(&cinfo->card_lock, flags);
- break;
- }
- spin_unlock_irqrestore(&cinfo->card_lock, flags);
-
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc block_til_ready blocking: "
- "ttyC%d, count = %d\n",
- info->line, info->port.count);
-#endif
- schedule();
- }
- } else {
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
- struct CH_CTRL __iomem *ch_ctrl;
-
- base_addr = cinfo->base_addr;
- firm_id = base_addr + ID_ADDRESS;
- if (!cyz_is_loaded(cinfo)) {
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->port.open_wait, &wait);
- return -EINVAL;
- }
-
- zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)
- & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
- ch_ctrl = zfw_ctrl->ch_ctrl;
-
- while (1) {
- if ((tty->termios->c_cflag & CBAUD)) {
- cy_writel(&ch_ctrl[channel].rs_control,
- readl(&ch_ctrl[channel].rs_control) |
- C_RS_RTS | C_RS_DTR);
- retval = cyz_issue_cmd(cinfo,
- channel, C_CM_IOCTLM, 0L);
- if (retval != 0) {
- printk(KERN_ERR "cyc:block_til_ready "
- "retval on ttyC%d was %x\n",
- info->line, retval);
- }
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:block_til_ready raising "
- "Z DTR\n");
-#endif
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(info->port.flags & ASYNC_INITIALIZED)) {
- retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
- -EAGAIN : -ERESTARTSYS);
- break;
- }
- if (!(info->port.flags & ASYNC_CLOSING) && (C_CLOCAL(tty) ||
- (readl(&ch_ctrl[channel].rs_status) &
- C_RS_DCD))) {
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc block_til_ready blocking: "
- "ttyC%d, count = %d\n",
- info->line, info->port.count);
-#endif
- schedule();
- }
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->port.open_wait, &wait);
- if (!tty_hung_up_p(filp)) {
- info->port.count++;
-#ifdef CY_DEBUG_COUNT
- printk(KERN_DEBUG "cyc:block_til_ready (%d): incrementing "
- "count to %d\n", current->pid, info->port.count);
-#endif
- }
- info->port.blocked_open--;
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc:block_til_ready after blocking: ttyC%d, "
- "count = %d\n", info->line, info->port.count);
-#endif
- if (retval)
- return retval;
- info->port.flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-} /* block_til_ready */
-
/*
* This routine is called whenever a serial port is opened. It
* performs the serial-specific initialization for the tty structure.
@@ -2435,7 +1597,6 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
printk(KERN_DEBUG "cyc:cy_open ttyC%d\n", info->line);
#endif
tty->driver_data = info;
- info->port.tty = tty;
if (serial_paranoia_check(info, tty->name, "cy_open"))
return -ENODEV;
@@ -2461,11 +1622,11 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
/*
* Start up serial port
*/
- retval = startup(info);
+ retval = cy_startup(info, tty);
if (retval)
return retval;
- retval = block_til_ready(tty, filp, info);
+ retval = tty_port_block_til_ready(&info->port, tty, filp);
if (retval) {
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open returning after block_til_ready "
@@ -2475,6 +1636,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
}
info->throttle = 0;
+ tty_port_tty_set(&info->port, tty);
#ifdef CY_DEBUG_OPEN
printk(KERN_DEBUG "cyc:cy_open done\n");
@@ -2489,8 +1651,6 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct cyclades_card *card;
struct cyclades_port *info = tty->driver_data;
- void __iomem *base_addr;
- int chip, channel, index;
unsigned long orig_jiffies;
int char_time;
@@ -2534,13 +1694,8 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
timeout, char_time, jiffies);
#endif
card = info->card;
- channel = (info->line) - (card->first_line);
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
- while (readb(base_addr + (CySRER << index)) & CyTxRdy) {
+ while (cyy_readb(info, CySRER) & CyTxRdy) {
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies);
#endif
@@ -2602,75 +1757,23 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
-
-#ifdef CY_DEBUG_OTHER
- printk(KERN_DEBUG "cyc:cy_close ttyC%d\n", info->line);
-#endif
+ int channel;
if (!info || serial_paranoia_check(info, tty->name, "cy_close"))
return;
card = info->card;
- spin_lock_irqsave(&card->card_lock, flags);
- /* If the TTY is being hung up, nothing to do */
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&card->card_lock, flags);
- return;
- }
-#ifdef CY_DEBUG_OPEN
- printk(KERN_DEBUG "cyc:cy_close ttyC%d, count = %d\n", info->line,
- info->port.count);
-#endif
- if ((tty->count == 1) && (info->port.count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "cyc:cy_close: bad serial port count; "
- "tty->count is 1, info->port.count is %d\n", info->port.count);
- info->port.count = 1;
- }
-#ifdef CY_DEBUG_COUNT
- printk(KERN_DEBUG "cyc:cy_close at (%d): decrementing count to %d\n",
- current->pid, info->port.count - 1);
-#endif
- if (--info->port.count < 0) {
-#ifdef CY_DEBUG_COUNT
- printk(KERN_DEBUG "cyc:cyc_close setting count to 0\n");
-#endif
- info->port.count = 0;
- }
- if (info->port.count) {
- spin_unlock_irqrestore(&card->card_lock, flags);
+ if (!tty_port_close_start(&info->port, tty, filp))
return;
- }
- info->port.flags |= ASYNC_CLOSING;
-
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- spin_unlock_irqrestore(&card->card_lock, flags);
- if (info->port.closing_wait != CY_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, info->port.closing_wait);
+ channel = info->line - card->first_line;
spin_lock_irqsave(&card->card_lock, flags);
if (!cy_is_Z(card)) {
- int channel = info->line - card->first_line;
- int index = card->bus_index;
- void __iomem *base_addr = card->base_addr +
- (cy_chip_offset[channel >> 2] << index);
/* Stop accepting input */
- channel &= 0x03;
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) & ~CyRxData);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData);
if (info->port.flags & ASYNC_INITIALIZED) {
/* Waiting for on-board buffers to be empty before
closing the port */
@@ -2682,15 +1785,10 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
#ifdef Z_WAKE
/* Waiting for on-board buffers to be empty before closing
the port */
- void __iomem *base_addr = card->base_addr;
- struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS;
- struct ZFW_CTRL __iomem *zfw_ctrl =
- base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- struct CH_CTRL __iomem *ch_ctrl = zfw_ctrl->ch_ctrl;
- int channel = info->line - card->first_line;
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
int retval;
- if (readl(&ch_ctrl[channel].flow_status) != C_FS_TXIDLE) {
+ if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) {
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L);
if (retval != 0) {
printk(KERN_DEBUG "cyc:cy_close retval on "
@@ -2704,30 +1802,12 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
}
spin_unlock_irqrestore(&card->card_lock, flags);
- shutdown(info);
+ cy_shutdown(info, tty);
cy_flush_buffer(tty);
- tty_ldisc_flush(tty);
- spin_lock_irqsave(&card->card_lock, flags);
- tty->closing = 0;
- info->port.tty = NULL;
- if (info->port.blocked_open) {
- spin_unlock_irqrestore(&card->card_lock, flags);
- if (info->port.close_delay) {
- msleep_interruptible(jiffies_to_msecs
- (info->port.close_delay));
- }
- wake_up_interruptible(&info->port.open_wait);
- spin_lock_irqsave(&card->card_lock, flags);
- }
- info->port.flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
- wake_up_interruptible(&info->port.close_wait);
-
-#ifdef CY_DEBUG_OTHER
- printk(KERN_DEBUG "cyc:cy_close done\n");
-#endif
+ tty_port_tty_set(&info->port, NULL);
- spin_unlock_irqrestore(&card->card_lock, flags);
+ tty_port_close_end(&info->port, tty);
} /* cy_close */
/* This routine gets called when tty_write has put something into
@@ -2870,18 +1950,13 @@ static int cy_write_room(struct tty_struct *tty)
static int cy_chars_in_buffer(struct tty_struct *tty)
{
- struct cyclades_card *card;
struct cyclades_port *info = tty->driver_data;
- int channel;
if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer"))
return 0;
- card = info->card;
- channel = (info->line) - (card->first_line);
-
#ifdef Z_EXT_CHARS_IN_BUFFER
- if (!cy_is_Z(card)) {
+ if (!cy_is_Z(info->card)) {
#endif /* Z_EXT_CHARS_IN_BUFFER */
#ifdef CY_DEBUG_IO
printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
@@ -2890,20 +1965,11 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
return info->xmit_cnt;
#ifdef Z_EXT_CHARS_IN_BUFFER
} else {
- static struct FIRM_ID *firm_id;
- static struct ZFW_CTRL *zfw_ctrl;
- static struct CH_CTRL *ch_ctrl;
- static struct BUF_CTRL *buf_ctrl;
+ struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
int char_count;
__u32 tx_put, tx_get, tx_bufsize;
lock_kernel();
- firm_id = card->base_addr + ID_ADDRESS;
- zfw_ctrl = card->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
- buf_ctrl = &(zfw_ctrl->buf_ctrl[channel]);
-
tx_get = readl(&buf_ctrl->tx_get);
tx_put = readl(&buf_ctrl->tx_put);
tx_bufsize = readl(&buf_ctrl->tx_bufsize);
@@ -2956,48 +2022,44 @@ static void cyy_baud_calc(struct cyclades_port *info, __u32 baud)
* This routine finds or computes the various line characteristics.
* It used to be called config_setup
*/
-static void set_line_char(struct cyclades_port *info)
+static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
{
struct cyclades_card *card;
unsigned long flags;
- void __iomem *base_addr;
- int chip, channel, index;
+ int channel;
unsigned cflag, iflag;
int baud, baud_rate = 0;
int i;
- if (!info->port.tty || !info->port.tty->termios)
+ if (!tty->termios) /* XXX can this happen at all? */
return;
if (info->line == -1)
return;
- cflag = info->port.tty->termios->c_cflag;
- iflag = info->port.tty->termios->c_iflag;
+ cflag = tty->termios->c_cflag;
+ iflag = tty->termios->c_iflag;
/*
* Set up the tty->alt_speed kludge
*/
- if (info->port.tty) {
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- info->port.tty->alt_speed = 57600;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- info->port.tty->alt_speed = 115200;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- info->port.tty->alt_speed = 230400;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- info->port.tty->alt_speed = 460800;
- }
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ tty->alt_speed = 57600;
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ tty->alt_speed = 115200;
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ tty->alt_speed = 230400;
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ tty->alt_speed = 460800;
card = info->card;
channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
-
- index = card->bus_index;
+ u32 cflags;
/* baud rate */
- baud = tty_get_baud_rate(info->port.tty);
+ baud = tty_get_baud_rate(tty);
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
if (info->custom_divisor)
@@ -3106,124 +2168,68 @@ static void set_line_char(struct cyclades_port *info)
cable. Contact Marcio Saito for details.
***********************************************/
- chip = channel >> 2;
channel &= 0x03;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
+ cyy_writeb(info, CyCAR, channel);
/* tx and rx baud rate */
- cy_writeb(base_addr + (CyTCOR << index), info->tco);
- cy_writeb(base_addr + (CyTBPR << index), info->tbpr);
- cy_writeb(base_addr + (CyRCOR << index), info->rco);
- cy_writeb(base_addr + (CyRBPR << index), info->rbpr);
+ cyy_writeb(info, CyTCOR, info->tco);
+ cyy_writeb(info, CyTBPR, info->tbpr);
+ cyy_writeb(info, CyRCOR, info->rco);
+ cyy_writeb(info, CyRBPR, info->rbpr);
/* set line characteristics according configuration */
- cy_writeb(base_addr + (CySCHR1 << index),
- START_CHAR(info->port.tty));
- cy_writeb(base_addr + (CySCHR2 << index), STOP_CHAR(info->port.tty));
- cy_writeb(base_addr + (CyCOR1 << index), info->cor1);
- cy_writeb(base_addr + (CyCOR2 << index), info->cor2);
- cy_writeb(base_addr + (CyCOR3 << index), info->cor3);
- cy_writeb(base_addr + (CyCOR4 << index), info->cor4);
- cy_writeb(base_addr + (CyCOR5 << index), info->cor5);
+ cyy_writeb(info, CySCHR1, START_CHAR(tty));
+ cyy_writeb(info, CySCHR2, STOP_CHAR(tty));
+ cyy_writeb(info, CyCOR1, info->cor1);
+ cyy_writeb(info, CyCOR2, info->cor2);
+ cyy_writeb(info, CyCOR3, info->cor3);
+ cyy_writeb(info, CyCOR4, info->cor4);
+ cyy_writeb(info, CyCOR5, info->cor5);
- cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
- CyCOR3ch, index);
+ cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
+ CyCOR3ch);
/* !!! Is this needed? */
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
- cy_writeb(base_addr + (CyRTPR << index),
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, CyRTPR,
(info->default_timeout ? info->default_timeout : 0x02));
/* 10ms rx timeout */
- if (C_CLOCAL(info->port.tty)) {
- /* without modem intr */
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) | CyMdmCh);
- /* act on 1->0 modem transitions */
- if ((cflag & CRTSCTS) && info->rflow) {
- cy_writeb(base_addr + (CyMCOR1 << index),
- (CyCTS | rflow_thr[i]));
- } else {
- cy_writeb(base_addr + (CyMCOR1 << index),
- CyCTS);
- }
- /* act on 0->1 modem transitions */
- cy_writeb(base_addr + (CyMCOR2 << index), CyCTS);
- } else {
- /* without modem intr */
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr +
- (CySRER << index)) | CyMdmCh);
- /* act on 1->0 modem transitions */
- if ((cflag & CRTSCTS) && info->rflow) {
- cy_writeb(base_addr + (CyMCOR1 << index),
- (CyDSR | CyCTS | CyRI | CyDCD |
- rflow_thr[i]));
- } else {
- cy_writeb(base_addr + (CyMCOR1 << index),
- CyDSR | CyCTS | CyRI | CyDCD);
- }
- /* act on 0->1 modem transitions */
- cy_writeb(base_addr + (CyMCOR2 << index),
- CyDSR | CyCTS | CyRI | CyDCD);
- }
+ cflags = CyCTS;
+ if (!C_CLOCAL(tty))
+ cflags |= CyDSR | CyRI | CyDCD;
+ /* without modem intr */
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyMdmCh);
+ /* act on 1->0 modem transitions */
+ if ((cflag & CRTSCTS) && info->rflow)
+ cyy_writeb(info, CyMCOR1, cflags | rflow_thr[i]);
+ else
+ cyy_writeb(info, CyMCOR1, cflags);
+ /* act on 0->1 modem transitions */
+ cyy_writeb(info, CyMCOR2, cflags);
- if (i == 0) { /* baud rate is zero, turn off line */
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR1 << index),
- ~CyRTS);
- } else {
- cy_writeb(base_addr + (CyMSVR2 << index),
- ~CyDTR);
- }
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:set_line_char dropping DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
-#endif
- } else {
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR1 << index),
- CyRTS);
- } else {
- cy_writeb(base_addr + (CyMSVR2 << index),
- CyDTR);
- }
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:set_line_char raising DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
-#endif
- }
+ if (i == 0) /* baud rate is zero, turn off line */
+ cyy_change_rts_dtr(info, 0, TIOCM_DTR);
+ else
+ cyy_change_rts_dtr(info, TIOCM_DTR, 0);
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct CH_CTRL __iomem *ch_ctrl;
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
__u32 sw_flow;
int retval;
- firm_id = card->base_addr + ID_ADDRESS;
if (!cyz_is_loaded(card))
return;
- zfw_ctrl = card->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
-
/* baud rate */
- baud = tty_get_baud_rate(info->port.tty);
+ baud = tty_get_baud_rate(tty);
if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
ASYNC_SPD_CUST) {
if (info->custom_divisor)
@@ -3334,45 +2340,38 @@ static void set_line_char(struct cyclades_port *info)
"was %x\n", info->line, retval);
}
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
}
} /* set_line_char */
-static int
-get_serial_info(struct cyclades_port *info,
+static int cy_get_serial_info(struct cyclades_port *info,
struct serial_struct __user *retinfo)
{
- struct serial_struct tmp;
struct cyclades_card *cinfo = info->card;
-
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->line;
- tmp.port = (info->card - cy_card) * 0x100 + info->line -
- cinfo->first_line;
- tmp.irq = cinfo->irq;
- tmp.flags = info->port.flags;
- tmp.close_delay = info->port.close_delay;
- tmp.closing_wait = info->port.closing_wait;
- tmp.baud_base = info->baud;
- tmp.custom_divisor = info->custom_divisor;
- tmp.hub6 = 0; /*!!! */
+ struct serial_struct tmp = {
+ .type = info->type,
+ .line = info->line,
+ .port = (info->card - cy_card) * 0x100 + info->line -
+ cinfo->first_line,
+ .irq = cinfo->irq,
+ .flags = info->port.flags,
+ .close_delay = info->port.close_delay,
+ .closing_wait = info->port.closing_wait,
+ .baud_base = info->baud,
+ .custom_divisor = info->custom_divisor,
+ .hub6 = 0, /*!!! */
+ };
return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
-} /* get_serial_info */
+}
static int
-set_serial_info(struct cyclades_port *info,
+cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
- struct cyclades_port old_info;
if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
return -EFAULT;
- old_info = *info;
if (!capable(CAP_SYS_ADMIN)) {
if (new_serial.close_delay != info->port.close_delay ||
@@ -3402,10 +2401,10 @@ set_serial_info(struct cyclades_port *info,
check_and_exit:
if (info->port.flags & ASYNC_INITIALIZED) {
- set_line_char(info);
+ cy_set_line_char(info, tty);
return 0;
} else {
- return startup(info);
+ return cy_startup(info, tty);
}
} /* set_serial_info */
@@ -3421,24 +2420,14 @@ check_and_exit:
*/
static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
{
- struct cyclades_card *card;
- int chip, channel, index;
- unsigned char status;
+ struct cyclades_card *card = info->card;
unsigned int result;
unsigned long flags;
- void __iomem *base_addr;
+ u8 status;
- card = info->card;
- channel = (info->line) - (card->first_line);
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&card->card_lock, flags);
- status = readb(base_addr + (CySRER << index)) &
- (CyTxRdy | CyTxMpty);
+ status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty);
spin_unlock_irqrestore(&card->card_lock, flags);
result = (status ? 0 : TIOCSER_TEMT);
} else {
@@ -3452,34 +2441,23 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
- int chip, channel, index;
- void __iomem *base_addr;
- unsigned long flags;
- unsigned char status;
- unsigned long lstatus;
- unsigned int result;
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
- struct CH_CTRL __iomem *ch_ctrl;
+ int result;
if (serial_paranoia_check(info, tty->name, __func__))
return -ENODEV;
- lock_kernel();
-
card = info->card;
- channel = info->line - card->first_line;
+
+ lock_kernel();
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
+ unsigned long flags;
+ int channel = info->line - card->first_line;
+ u8 status;
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
- status = readb(base_addr + (CyMSVR1 << index));
- status |= readb(base_addr + (CyMSVR2 << index));
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ status = cyy_readb(info, CyMSVR1);
+ status |= cyy_readb(info, CyMSVR2);
spin_unlock_irqrestore(&card->card_lock, flags);
if (info->rtsdtr_inv) {
@@ -3494,27 +2472,22 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
((status & CyDSR) ? TIOCM_DSR : 0) |
((status & CyCTS) ? TIOCM_CTS : 0);
} else {
- base_addr = card->base_addr;
- firm_id = card->base_addr + ID_ADDRESS;
- if (cyz_is_loaded(card)) {
- zfw_ctrl = card->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
- ch_ctrl = zfw_ctrl->ch_ctrl;
- lstatus = readl(&ch_ctrl[channel].rs_status);
- result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) |
- ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) |
- ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) |
- ((lstatus & C_RS_RI) ? TIOCM_RNG : 0) |
- ((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) |
- ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
- } else {
- result = 0;
- unlock_kernel();
- return -ENODEV;
+ u32 lstatus;
+
+ if (!cyz_is_loaded(card)) {
+ result = -ENODEV;
+ goto end;
}
+ lstatus = readl(&info->u.cyz.ch_ctrl->rs_status);
+ result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) |
+ ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) |
+ ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) |
+ ((lstatus & C_RS_RI) ? TIOCM_RNG : 0) |
+ ((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) |
+ ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
}
+end:
unlock_kernel();
return result;
} /* cy_tiomget */
@@ -3525,150 +2498,53 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
{
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
- int chip, channel, index;
- void __iomem *base_addr;
unsigned long flags;
- struct FIRM_ID __iomem *firm_id;
- struct ZFW_CTRL __iomem *zfw_ctrl;
- struct BOARD_CTRL __iomem *board_ctrl;
- struct CH_CTRL __iomem *ch_ctrl;
- int retval;
if (serial_paranoia_check(info, tty->name, __func__))
return -ENODEV;
card = info->card;
- channel = (info->line) - (card->first_line);
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_change_rts_dtr(info, set, clear);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ } else {
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+ int retval, channel = info->line - card->first_line;
+ u32 rs;
- if (set & TIOCM_RTS) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR2 << index),
- CyDTR);
- } else {
- cy_writeb(base_addr + (CyMSVR1 << index),
- CyRTS);
- }
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
- if (clear & TIOCM_RTS) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR2 << index),
- ~CyDTR);
- } else {
- cy_writeb(base_addr + (CyMSVR1 << index),
- ~CyRTS);
- }
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
+ if (!cyz_is_loaded(card))
+ return -ENODEV;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ rs = readl(&ch_ctrl->rs_control);
+ if (set & TIOCM_RTS)
+ rs |= C_RS_RTS;
+ if (clear & TIOCM_RTS)
+ rs &= ~C_RS_RTS;
if (set & TIOCM_DTR) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR1 << index),
- CyRTS);
- } else {
- cy_writeb(base_addr + (CyMSVR2 << index),
- CyDTR);
- }
+ rs |= C_RS_DTR;
#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
+ printk(KERN_DEBUG "cyc:set_modem_info raising Z DTR\n");
#endif
- spin_unlock_irqrestore(&card->card_lock, flags);
}
if (clear & TIOCM_DTR) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR1 << index),
- ~CyRTS);
- } else {
- cy_writeb(base_addr + (CyMSVR2 << index),
- ~CyDTR);
- }
-
+ rs &= ~C_RS_DTR;
#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n");
- printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
- readb(base_addr + (CyMSVR1 << index)),
- readb(base_addr + (CyMSVR2 << index)));
+ printk(KERN_DEBUG "cyc:set_modem_info clearing "
+ "Z DTR\n");
#endif
- spin_unlock_irqrestore(&card->card_lock, flags);
}
- } else {
- base_addr = card->base_addr;
-
- firm_id = card->base_addr + ID_ADDRESS;
- if (cyz_is_loaded(card)) {
- zfw_ctrl = card->base_addr +
- (readl(&firm_id->zfwctrl_addr) & 0xfffff);
- board_ctrl = &zfw_ctrl->board_ctrl;
- ch_ctrl = zfw_ctrl->ch_ctrl;
-
- if (set & TIOCM_RTS) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writel(&ch_ctrl[channel].rs_control,
- readl(&ch_ctrl[channel].rs_control) |
- C_RS_RTS);
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
- if (clear & TIOCM_RTS) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writel(&ch_ctrl[channel].rs_control,
- readl(&ch_ctrl[channel].rs_control) &
- ~C_RS_RTS);
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
- if (set & TIOCM_DTR) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writel(&ch_ctrl[channel].rs_control,
- readl(&ch_ctrl[channel].rs_control) |
- C_RS_DTR);
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:set_modem_info raising "
- "Z DTR\n");
-#endif
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
- if (clear & TIOCM_DTR) {
- spin_lock_irqsave(&card->card_lock, flags);
- cy_writel(&ch_ctrl[channel].rs_control,
- readl(&ch_ctrl[channel].rs_control) &
- ~C_RS_DTR);
-#ifdef CY_DEBUG_DTR
- printk(KERN_DEBUG "cyc:set_modem_info clearing "
- "Z DTR\n");
-#endif
- spin_unlock_irqrestore(&card->card_lock, flags);
- }
- } else {
- return -ENODEV;
- }
- spin_lock_irqsave(&card->card_lock, flags);
+ cy_writel(&ch_ctrl->rs_control, rs);
retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
+ spin_unlock_irqrestore(&card->card_lock, flags);
if (retval != 0) {
printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d "
"was %x\n", info->line, retval);
}
- spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
-} /* cy_tiocmset */
+}
/*
* cy_break() --- routine which turns the break handling on or off
@@ -3733,41 +2609,18 @@ static int cy_break(struct tty_struct *tty, int break_state)
return retval;
} /* cy_break */
-static int get_mon_info(struct cyclades_port *info,
- struct cyclades_monitor __user *mon)
-{
-
- if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
- return -EFAULT;
- info->mon.int_count = 0;
- info->mon.char_count = 0;
- info->mon.char_max = 0;
- info->mon.char_last = 0;
- return 0;
-} /* get_mon_info */
-
static int set_threshold(struct cyclades_port *info, unsigned long value)
{
- struct cyclades_card *card;
- void __iomem *base_addr;
- int channel, chip, index;
+ struct cyclades_card *card = info->card;
unsigned long flags;
- card = info->card;
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr =
- card->base_addr + (cy_chip_offset[chip] << index);
-
info->cor3 &= ~CyREC_FIFO;
info->cor3 |= value & CyREC_FIFO;
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCOR3 << index), info->cor3);
- cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR3ch, index);
+ cyy_writeb(info, CyCOR3, info->cor3);
+ cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch);
spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
@@ -3776,55 +2629,23 @@ static int set_threshold(struct cyclades_port *info, unsigned long value)
static int get_threshold(struct cyclades_port *info,
unsigned long __user *value)
{
- struct cyclades_card *card;
- void __iomem *base_addr;
- int channel, chip, index;
- unsigned long tmp;
+ struct cyclades_card *card = info->card;
- card = info->card;
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-
- tmp = readb(base_addr + (CyCOR3 << index)) & CyREC_FIFO;
+ u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO;
return put_user(tmp, value);
}
return 0;
} /* get_threshold */
-static int set_default_threshold(struct cyclades_port *info,
- unsigned long value)
-{
- info->default_threshold = value & 0x0f;
- return 0;
-} /* set_default_threshold */
-
-static int get_default_threshold(struct cyclades_port *info,
- unsigned long __user *value)
-{
- return put_user(info->default_threshold, value);
-} /* get_default_threshold */
-
static int set_timeout(struct cyclades_port *info, unsigned long value)
{
- struct cyclades_card *card;
- void __iomem *base_addr;
- int channel, chip, index;
+ struct cyclades_card *card = info->card;
unsigned long flags;
- card = info->card;
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyRTPR << index), value & 0xff);
+ cyy_writeb(info, CyRTPR, value & 0xff);
spin_unlock_irqrestore(&card->card_lock, flags);
}
return 0;
@@ -3833,36 +2654,35 @@ static int set_timeout(struct cyclades_port *info, unsigned long value)
static int get_timeout(struct cyclades_port *info,
unsigned long __user *value)
{
- struct cyclades_card *card;
- void __iomem *base_addr;
- int channel, chip, index;
- unsigned long tmp;
+ struct cyclades_card *card = info->card;
- card = info->card;
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr + (cy_chip_offset[chip] << index);
-
- tmp = readb(base_addr + (CyRTPR << index));
+ u8 tmp = cyy_readb(info, CyRTPR);
return put_user(tmp, value);
}
return 0;
} /* get_timeout */
-static int set_default_timeout(struct cyclades_port *info, unsigned long value)
+static int cy_cflags_changed(struct cyclades_port *info, unsigned long arg,
+ struct cyclades_icount *cprev)
{
- info->default_timeout = value & 0xff;
- return 0;
-} /* set_default_timeout */
+ struct cyclades_icount cnow;
+ unsigned long flags;
+ int ret;
-static int get_default_timeout(struct cyclades_port *info,
- unsigned long __user *value)
-{
- return put_user(info->default_timeout, value);
-} /* get_default_timeout */
+ spin_lock_irqsave(&info->card->card_lock, flags);
+ cnow = info->icount; /* atomic copy */
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+
+ ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
/*
* This routine allows the tty driver to implement device-
@@ -3874,8 +2694,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct cyclades_port *info = tty->driver_data;
- struct cyclades_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser; /* user space */
+ struct cyclades_icount cnow; /* kernel counter temps */
int ret_val = 0;
unsigned long flags;
void __user *argp = (void __user *)arg;
@@ -3891,7 +2710,11 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
switch (cmd) {
case CYGETMON:
- ret_val = get_mon_info(info, argp);
+ if (copy_to_user(argp, &info->mon, sizeof(info->mon))) {
+ ret_val = -EFAULT;
+ break;
+ }
+ memset(&info->mon, 0, sizeof(info->mon));
break;
case CYGETTHRESH:
ret_val = get_threshold(info, argp);
@@ -3900,10 +2723,11 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
ret_val = set_threshold(info, arg);
break;
case CYGETDEFTHRESH:
- ret_val = get_default_threshold(info, argp);
+ ret_val = put_user(info->default_threshold,
+ (unsigned long __user *)argp);
break;
case CYSETDEFTHRESH:
- ret_val = set_default_threshold(info, arg);
+ info->default_threshold = arg & 0x0f;
break;
case CYGETTIMEOUT:
ret_val = get_timeout(info, argp);
@@ -3912,21 +2736,20 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
ret_val = set_timeout(info, arg);
break;
case CYGETDEFTIMEOUT:
- ret_val = get_default_timeout(info, argp);
+ ret_val = put_user(info->default_timeout,
+ (unsigned long __user *)argp);
break;
case CYSETDEFTIMEOUT:
- ret_val = set_default_timeout(info, arg);
+ info->default_timeout = arg & 0xff;
break;
case CYSETRFLOW:
info->rflow = (int)arg;
- ret_val = 0;
break;
case CYGETRFLOW:
ret_val = info->rflow;
break;
case CYSETRTSDTR_INV:
info->rtsdtr_inv = (int)arg;
- ret_val = 0;
break;
case CYGETRTSDTR_INV:
ret_val = info->rtsdtr_inv;
@@ -3937,7 +2760,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
#ifndef CONFIG_CYZ_INTR
case CYZSETPOLLCYCLE:
cyz_polling_cycle = (arg * HZ) / 1000;
- ret_val = 0;
break;
case CYZGETPOLLCYCLE:
ret_val = (cyz_polling_cycle * 1000) / HZ;
@@ -3945,16 +2767,15 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
#endif /* CONFIG_CYZ_INTR */
case CYSETWAIT:
info->port.closing_wait = (unsigned short)arg * HZ / 100;
- ret_val = 0;
break;
case CYGETWAIT:
ret_val = info->port.closing_wait / (HZ / 100);
break;
case TIOCGSERIAL:
- ret_val = get_serial_info(info, argp);
+ ret_val = cy_get_serial_info(info, argp);
break;
case TIOCSSERIAL:
- ret_val = set_serial_info(info, argp);
+ ret_val = cy_set_serial_info(info, tty, argp);
break;
case TIOCSERGETLSR: /* Get line status register */
ret_val = get_lsr_info(info, argp);
@@ -3970,17 +2791,8 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
/* note the counters on entry */
cnow = info->icount;
spin_unlock_irqrestore(&info->card->card_lock, flags);
- ret_val = wait_event_interruptible(info->delta_msr_wait, ({
- cprev = cnow;
- spin_lock_irqsave(&info->card->card_lock, flags);
- cnow = info->icount; /* atomic copy */
- spin_unlock_irqrestore(&info->card->card_lock, flags);
-
- ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
- }));
+ ret_val = wait_event_interruptible(info->delta_msr_wait,
+ cy_cflags_changed(info, arg, &cnow));
break;
/*
@@ -3989,46 +2801,29 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
- case TIOCGICOUNT:
+ case TIOCGICOUNT: {
+ struct serial_icounter_struct sic = { };
+
spin_lock_irqsave(&info->card->card_lock, flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->card->card_lock, flags);
- p_cuser = argp;
- ret_val = put_user(cnow.cts, &p_cuser->cts);
- if (ret_val)
- break;
- ret_val = put_user(cnow.dsr, &p_cuser->dsr);
- if (ret_val)
- break;
- ret_val = put_user(cnow.rng, &p_cuser->rng);
- if (ret_val)
- break;
- ret_val = put_user(cnow.dcd, &p_cuser->dcd);
- if (ret_val)
- break;
- ret_val = put_user(cnow.rx, &p_cuser->rx);
- if (ret_val)
- break;
- ret_val = put_user(cnow.tx, &p_cuser->tx);
- if (ret_val)
- break;
- ret_val = put_user(cnow.frame, &p_cuser->frame);
- if (ret_val)
- break;
- ret_val = put_user(cnow.overrun, &p_cuser->overrun);
- if (ret_val)
- break;
- ret_val = put_user(cnow.parity, &p_cuser->parity);
- if (ret_val)
- break;
- ret_val = put_user(cnow.brk, &p_cuser->brk);
- if (ret_val)
- break;
- ret_val = put_user(cnow.buf_overrun, &p_cuser->buf_overrun);
- if (ret_val)
- break;
- ret_val = 0;
+
+ sic.cts = cnow.cts;
+ sic.dsr = cnow.dsr;
+ sic.rng = cnow.rng;
+ sic.dcd = cnow.dcd;
+ sic.rx = cnow.rx;
+ sic.tx = cnow.tx;
+ sic.frame = cnow.frame;
+ sic.overrun = cnow.overrun;
+ sic.parity = cnow.parity;
+ sic.brk = cnow.brk;
+ sic.buf_overrun = cnow.buf_overrun;
+
+ if (copy_to_user(argp, &sic, sizeof(sic)))
+ ret_val = -EFAULT;
break;
+ }
default:
ret_val = -ENOIOCTLCMD;
}
@@ -4054,7 +2849,7 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line);
#endif
- set_line_char(info);
+ cy_set_line_char(info, tty);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
@@ -4111,8 +2906,6 @@ static void cy_throttle(struct tty_struct *tty)
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
- void __iomem *base_addr;
- int chip, channel, index;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
@@ -4134,24 +2927,9 @@ static void cy_throttle(struct tty_struct *tty)
}
if (tty->termios->c_cflag & CRTSCTS) {
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr +
- (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR2 << index),
- ~CyDTR);
- } else {
- cy_writeb(base_addr + (CyMSVR1 << index),
- ~CyRTS);
- }
+ cyy_change_rts_dtr(info, 0, TIOCM_RTS);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
info->throttle = 1;
@@ -4169,8 +2947,6 @@ static void cy_unthrottle(struct tty_struct *tty)
struct cyclades_port *info = tty->driver_data;
struct cyclades_card *card;
unsigned long flags;
- void __iomem *base_addr;
- int chip, channel, index;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
@@ -4191,24 +2967,9 @@ static void cy_unthrottle(struct tty_struct *tty)
if (tty->termios->c_cflag & CRTSCTS) {
card = info->card;
- channel = info->line - card->first_line;
if (!cy_is_Z(card)) {
- chip = channel >> 2;
- channel &= 0x03;
- index = card->bus_index;
- base_addr = card->base_addr +
- (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&card->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) channel);
- if (info->rtsdtr_inv) {
- cy_writeb(base_addr + (CyMSVR2 << index),
- CyDTR);
- } else {
- cy_writeb(base_addr + (CyMSVR1 << index),
- CyRTS);
- }
+ cyy_change_rts_dtr(info, TIOCM_RTS, 0);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
info->throttle = 0;
@@ -4223,8 +2984,7 @@ static void cy_stop(struct tty_struct *tty)
{
struct cyclades_card *cinfo;
struct cyclades_port *info = tty->driver_data;
- void __iomem *base_addr;
- int chip, channel, index;
+ int channel;
unsigned long flags;
#ifdef CY_DEBUG_OTHER
@@ -4237,16 +2997,9 @@ static void cy_stop(struct tty_struct *tty)
cinfo = info->card;
channel = info->line - cinfo->first_line;
if (!cy_is_Z(cinfo)) {
- index = cinfo->bus_index;
- chip = channel >> 2;
- channel &= 0x03;
- base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&cinfo->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char)(channel & 0x0003)); /* index channel */
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) & ~CyTxRdy);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
} /* cy_stop */
@@ -4255,8 +3008,7 @@ static void cy_start(struct tty_struct *tty)
{
struct cyclades_card *cinfo;
struct cyclades_port *info = tty->driver_data;
- void __iomem *base_addr;
- int chip, channel, index;
+ int channel;
unsigned long flags;
#ifdef CY_DEBUG_OTHER
@@ -4268,17 +3020,10 @@ static void cy_start(struct tty_struct *tty)
cinfo = info->card;
channel = info->line - cinfo->first_line;
- index = cinfo->bus_index;
if (!cy_is_Z(cinfo)) {
- chip = channel >> 2;
- channel &= 0x03;
- base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
-
spin_lock_irqsave(&cinfo->card_lock, flags);
- cy_writeb(base_addr + (CyCAR << index),
- (u_char) (channel & 0x0003)); /* index channel */
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) | CyTxRdy);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
spin_unlock_irqrestore(&cinfo->card_lock, flags);
}
} /* cy_start */
@@ -4298,17 +3043,82 @@ static void cy_hangup(struct tty_struct *tty)
return;
cy_flush_buffer(tty);
- shutdown(info);
- info->port.count = 0;
-#ifdef CY_DEBUG_COUNT
- printk(KERN_DEBUG "cyc:cy_hangup (%d): setting count to 0\n",
- current->pid);
-#endif
- info->port.tty = NULL;
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
- wake_up_interruptible(&info->port.open_wait);
+ cy_shutdown(info, tty);
+ tty_port_hangup(&info->port);
} /* cy_hangup */
+static int cyy_carrier_raised(struct tty_port *port)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *cinfo = info->card;
+ unsigned long flags;
+ int channel = info->line - cinfo->first_line;
+ u32 cd;
+
+ spin_lock_irqsave(&cinfo->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cd = cyy_readb(info, CyMSVR1) & CyDCD;
+ spin_unlock_irqrestore(&cinfo->card_lock, flags);
+
+ return cd;
+}
+
+static void cyy_dtr_rts(struct tty_port *port, int raise)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *cinfo = info->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cinfo->card_lock, flags);
+ cyy_change_rts_dtr(info, raise ? TIOCM_RTS | TIOCM_DTR : 0,
+ raise ? 0 : TIOCM_RTS | TIOCM_DTR);
+ spin_unlock_irqrestore(&cinfo->card_lock, flags);
+}
+
+static int cyz_carrier_raised(struct tty_port *port)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+
+ return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD;
+}
+
+static void cyz_dtr_rts(struct tty_port *port, int raise)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *cinfo = info->card;
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+ int ret, channel = info->line - cinfo->first_line;
+ u32 rs;
+
+ rs = readl(&ch_ctrl->rs_control);
+ if (raise)
+ rs |= C_RS_RTS | C_RS_DTR;
+ else
+ rs &= ~(C_RS_RTS | C_RS_DTR);
+ cy_writel(&ch_ctrl->rs_control, rs);
+ ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L);
+ if (ret != 0)
+ printk(KERN_ERR "%s: retval on ttyC%d was %x\n",
+ __func__, info->line, ret);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "%s: raising Z DTR\n", __func__);
+#endif
+}
+
+static const struct tty_port_operations cyy_port_ops = {
+ .carrier_raised = cyy_carrier_raised,
+ .dtr_rts = cyy_dtr_rts,
+};
+
+static const struct tty_port_operations cyz_port_ops = {
+ .carrier_raised = cyz_carrier_raised,
+ .dtr_rts = cyz_dtr_rts,
+};
+
/*
* ---------------------------------------------------------------------
* cy_init() and friends
@@ -4320,8 +3130,7 @@ static void cy_hangup(struct tty_struct *tty)
static int __devinit cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
- unsigned int port;
- unsigned short chip_number;
+ unsigned int channel, port;
spin_lock_init(&cinfo->card_lock);
cinfo->intr_enabled = 0;
@@ -4333,9 +3142,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
return -ENOMEM;
}
- for (port = cinfo->first_line; port < cinfo->first_line + cinfo->nports;
- port++) {
- info = &cinfo->ports[port - cinfo->first_line];
+ for (channel = 0, port = cinfo->first_line; channel < cinfo->nports;
+ channel++, port++) {
+ info = &cinfo->ports[channel];
tty_port_init(&info->port);
info->magic = CYCLADES_MAGIC;
info->card = cinfo;
@@ -4348,7 +3157,17 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
init_waitqueue_head(&info->delta_msr_wait);
if (cy_is_Z(cinfo)) {
+ struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS;
+ struct ZFW_CTRL *zfw_ctrl;
+
+ info->port.ops = &cyz_port_ops;
info->type = PORT_STARTECH;
+
+ zfw_ctrl = cinfo->base_addr +
+ (readl(&firm_id->zfwctrl_addr) & 0xfffff);
+ info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel];
+ info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
+
if (cinfo->hw_ver == ZO_V1)
info->xmit_fifo_size = CYZ_FIFO_SIZE;
else
@@ -4358,17 +3177,20 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
cyz_rx_restart, (unsigned long)info);
#endif
} else {
+ unsigned short chip_number;
int index = cinfo->bus_index;
+
+ info->port.ops = &cyy_port_ops;
info->type = PORT_CIRRUS;
info->xmit_fifo_size = CyMAX_CHAR_FIFO;
info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
info->cor2 = CyETC;
info->cor3 = 0x08; /* _very_ small rcv threshold */
- chip_number = (port - cinfo->first_line) / 4;
- info->chip_rev = readb(cinfo->base_addr +
- (cy_chip_offset[chip_number] << index) +
- (CyGFRCR << index));
+ chip_number = channel / CyPORTS_PER_CHIP;
+ info->u.cyy.base_addr = cinfo->base_addr +
+ (cy_chip_offset[chip_number] << index);
+ info->chip_rev = cyy_readb(info, CyGFRCR);
if (info->chip_rev >= CD1400_REV_J) {
/* It is a CD1400 rev. J or later */
@@ -5059,8 +3881,14 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP;
} else {
+ struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS;
+ struct ZFW_CTRL __iomem *zfw_ctrl;
+
+ zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
+
cy_card[card_no].hw_ver = mailbox;
cy_card[card_no].num_chips = (unsigned int)-1;
+ cy_card[card_no].board_ctrl = &zfw_ctrl->board_ctrl;
#ifdef CONFIG_CYZ_INTR
/* allocate IRQ only if board has an IRQ */
if (irq != 0 && irq != 255) {
@@ -5190,18 +4018,30 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
for (j = 0; j < cy_card[i].nports; j++) {
info = &cy_card[i].ports[j];
- if (info->port.count)
+ if (info->port.count) {
+ /* XXX is the ldisc num worth this? */
+ struct tty_struct *tty;
+ struct tty_ldisc *ld;
+ int num = 0;
+ tty = tty_port_tty_get(&info->port);
+ if (tty) {
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ num = ld->ops->num;
+ tty_ldisc_deref(ld);
+ }
+ tty_kref_put(tty);
+ }
seq_printf(m, "%3d %8lu %10lu %8lu "
- "%10lu %8lu %9lu %6ld\n", info->line,
+ "%10lu %8lu %9lu %6d\n", info->line,
(cur_jifs - info->idle_stats.in_use) /
HZ, info->idle_stats.xmit_bytes,
(cur_jifs - info->idle_stats.xmit_idle)/
HZ, info->idle_stats.recv_bytes,
(cur_jifs - info->idle_stats.recv_idle)/
HZ, info->idle_stats.overruns,
- /* FIXME: double check locking */
- (long)info->port.tty->ldisc->ops->num);
- else
+ num);
+ } else
seq_printf(m, "%3d %8lu %10lu %8lu "
"%10lu %8lu %9lu %6ld\n",
info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L);
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index cd0ba51f7c80..0d8c5788b8e4 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -44,8 +44,8 @@
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id pci_tbl[] = {
- { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(AMD, 0x7443), 0, },
+ { PCI_VDEVICE(AMD, 0x746b), 0, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 64d513f68368..4c4d4e140f98 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -46,8 +46,7 @@
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id pci_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index afa8813e737a..645237bda682 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -822,6 +822,7 @@ static const struct file_operations zero_fops = {
* - permits private mappings, "copies" are taken of the source of zeros
*/
static struct backing_dev_info zero_bdi = {
+ .name = "char/mem",
.capabilities = BDI_CAP_MAP_COPY,
};
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8c7444857a4b..d8a9255e1a3f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -240,6 +240,7 @@
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/cryptohash.h>
+#include <linux/fips.h>
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
@@ -413,6 +414,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
+ __u8 *last_data;
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
while (nbytes) {
extract_buf(r, tmp);
+
+ if (r->last_data) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i);
nbytes -= i;
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ /* Enable continuous test in fips mode */
+ if (fips_enabled)
+ r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 6062b62800fd..b3ec9b10e292 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -1,7 +1,7 @@
/*
* Driver for TANBAC TB0219 base board.
*
- * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/tb0219.h>
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index a19e935847b0..913aa8d3f1c5 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -867,15 +867,22 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_wait_idle(tty);
/*
- * Shutdown the current line discipline, and reset it to N_TTY.
- *
- * FIXME: this MUST get fixed for the new reflocking
+ * Now kill off the ldisc
*/
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
- tty_ldisc_reinit(tty);
/* This will need doing differently if we need to lock */
if (o_tty)
tty_ldisc_release(o_tty, NULL);
+
+ /* And the memory resources remaining (buffers, termios) will be
+ disposed of when the kref hits zero */
}
/**
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index 54c837288d19..e69de29bb2d1 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -1,680 +0,0 @@
-/*
- * Driver for NEC VR4100 series General-purpose I/O Unit.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2003-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/smp_lock.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
-MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
-MODULE_LICENSE("GPL");
-
-static int major; /* default is dynamic major device number */
-module_param(major, int, 0);
-MODULE_PARM_DESC(major, "Major device number");
-
-#define GIUIOSELL 0x00
-#define GIUIOSELH 0x02
-#define GIUPIODL 0x04
-#define GIUPIODH 0x06
-#define GIUINTSTATL 0x08
-#define GIUINTSTATH 0x0a
-#define GIUINTENL 0x0c
-#define GIUINTENH 0x0e
-#define GIUINTTYPL 0x10
-#define GIUINTTYPH 0x12
-#define GIUINTALSELL 0x14
-#define GIUINTALSELH 0x16
-#define GIUINTHTSELL 0x18
-#define GIUINTHTSELH 0x1a
-#define GIUPODATL 0x1c
-#define GIUPODATEN 0x1c
-#define GIUPODATH 0x1e
- #define PIOEN0 0x0100
- #define PIOEN1 0x0200
-#define GIUPODAT 0x1e
-#define GIUFEDGEINHL 0x20
-#define GIUFEDGEINHH 0x22
-#define GIUREDGEINHL 0x24
-#define GIUREDGEINHH 0x26
-
-#define GIUUSEUPDN 0x1e0
-#define GIUTERMUPDN 0x1e2
-
-#define GPIO_HAS_PULLUPDOWN_IO 0x0001
-#define GPIO_HAS_OUTPUT_ENABLE 0x0002
-#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
-
-static spinlock_t giu_lock;
-static unsigned long giu_flags;
-static unsigned int giu_nr_pins;
-
-static void __iomem *giu_base;
-
-#define giu_read(offset) readw(giu_base + (offset))
-#define giu_write(offset, value) writew((value), giu_base + (offset))
-
-#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
-#define GIUINT_HIGH_OFFSET 16
-#define GIUINT_HIGH_MAX 32
-
-static inline uint16_t giu_set(uint16_t offset, uint16_t set)
-{
- uint16_t data;
-
- data = giu_read(offset);
- data |= set;
- giu_write(offset, data);
-
- return data;
-}
-
-static inline uint16_t giu_clear(uint16_t offset, uint16_t clear)
-{
- uint16_t data;
-
- data = giu_read(offset);
- data &= ~clear;
- giu_write(offset, data);
-
- return data;
-}
-
-static void ack_giuint_low(unsigned int irq)
-{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
-}
-
-static void mask_giuint_low(unsigned int irq)
-{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
-}
-
-static void mask_ack_giuint_low(unsigned int irq)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(irq);
- giu_clear(GIUINTENL, 1 << pin);
- giu_write(GIUINTSTATL, 1 << pin);
-}
-
-static void unmask_giuint_low(unsigned int irq)
-{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
-}
-
-static struct irq_chip giuint_low_irq_chip = {
- .name = "GIUINTL",
- .ack = ack_giuint_low,
- .mask = mask_giuint_low,
- .mask_ack = mask_ack_giuint_low,
- .unmask = unmask_giuint_low,
-};
-
-static void ack_giuint_high(unsigned int irq)
-{
- giu_write(GIUINTSTATH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_giuint_high(unsigned int irq)
-{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_ack_giuint_high(unsigned int irq)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
- giu_clear(GIUINTENH, 1 << pin);
- giu_write(GIUINTSTATH, 1 << pin);
-}
-
-static void unmask_giuint_high(unsigned int irq)
-{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
-}
-
-static struct irq_chip giuint_high_irq_chip = {
- .name = "GIUINTH",
- .ack = ack_giuint_high,
- .mask = mask_giuint_high,
- .mask_ack = mask_ack_giuint_high,
- .unmask = unmask_giuint_high,
-};
-
-static int giu_get_irq(unsigned int irq)
-{
- uint16_t pendl, pendh, maskl, maskh;
- int i;
-
- pendl = giu_read(GIUINTSTATL);
- pendh = giu_read(GIUINTSTATH);
- maskl = giu_read(GIUINTENL);
- maskh = giu_read(GIUINTENH);
-
- maskl &= pendl;
- maskh &= pendh;
-
- if (maskl) {
- for (i = 0; i < 16; i++) {
- if (maskl & (1 << i))
- return GIU_IRQ(i);
- }
- } else if (maskh) {
- for (i = 0; i < 16; i++) {
- if (maskh & (1 << i))
- return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
- }
- }
-
- printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
- maskl, pendl, maskh, pendh);
-
- atomic_inc(&irq_err_count);
-
- return -EINVAL;
-}
-
-void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_t signal)
-{
- uint16_t mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPL, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELL, mask);
- else
- giu_clear(GIUINTHTSELL, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHL, mask);
- giu_clear(GIUREDGEINHL, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- default:
- giu_set(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- }
- }
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPL, mask);
- giu_clear(GIUINTHTSELL, mask);
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPH, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELH, mask);
- else
- giu_clear(GIUINTHTSELH, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHH, mask);
- giu_clear(GIUREDGEINHH, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- default:
- giu_set(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- }
- }
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPH, mask);
- giu_clear(GIUINTHTSELH, mask);
- set_irq_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
-
-void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
-{
- uint16_t mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELL, mask);
- else
- giu_clear(GIUINTALSELL, mask);
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELH, mask);
- else
- giu_clear(GIUINTALSELH, mask);
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
-
-gpio_data_t vr41xx_gpio_get_pin(unsigned int pin)
-{
- uint16_t reg, mask;
-
- if (pin >= giu_nr_pins)
- return GPIO_DATA_INVAL;
-
- if (pin < 16) {
- reg = giu_read(GIUPIODL);
- mask = (uint16_t)1 << pin;
- } else if (pin < 32) {
- reg = giu_read(GIUPIODH);
- mask = (uint16_t)1 << (pin - 16);
- } else if (pin < 48) {
- reg = giu_read(GIUPODATL);
- mask = (uint16_t)1 << (pin - 32);
- } else {
- reg = giu_read(GIUPODATH);
- mask = (uint16_t)1 << (pin - 48);
- }
-
- if (reg & mask)
- return GPIO_DATA_HIGH;
-
- return GPIO_DATA_LOW;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_get_pin);
-
-int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data)
-{
- uint16_t offset, mask, reg;
- unsigned long flags;
-
- if (pin >= giu_nr_pins)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUPIODL;
- mask = (uint16_t)1 << pin;
- } else if (pin < 32) {
- offset = GIUPIODH;
- mask = (uint16_t)1 << (pin - 16);
- } else if (pin < 48) {
- offset = GIUPODATL;
- mask = (uint16_t)1 << (pin - 32);
- } else {
- offset = GIUPODATH;
- mask = (uint16_t)1 << (pin - 48);
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (data == GPIO_DATA_HIGH)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_set_pin);
-
-int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir)
-{
- uint16_t offset, mask, reg;
- unsigned long flags;
-
- if (pin >= giu_nr_pins)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUIOSELL;
- mask = (uint16_t)1 << pin;
- } else if (pin < 32) {
- offset = GIUIOSELH;
- mask = (uint16_t)1 << (pin - 16);
- } else {
- if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
- offset = GIUPODATEN;
- mask = (uint16_t)1 << (pin - 32);
- } else {
- switch (pin) {
- case 48:
- offset = GIUPODATH;
- mask = PIOEN0;
- break;
- case 49:
- offset = GIUPODATH;
- mask = PIOEN1;
- break;
- default:
- return -EINVAL;
- }
- }
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (dir == GPIO_OUTPUT)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_set_direction);
-
-int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
-{
- uint16_t reg, mask;
- unsigned long flags;
-
- if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
- return -EPERM;
-
- if (pin >= 15)
- return -EINVAL;
-
- mask = (uint16_t)1 << pin;
-
- spin_lock_irqsave(&giu_lock, flags);
-
- if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
- reg = giu_read(GIUTERMUPDN);
- if (pull == GPIO_PULL_UP)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(GIUTERMUPDN, reg);
-
- reg = giu_read(GIUUSEUPDN);
- reg |= mask;
- giu_write(GIUUSEUPDN, reg);
- } else {
- reg = giu_read(GIUUSEUPDN);
- reg &= ~mask;
- giu_write(GIUUSEUPDN, reg);
- }
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
-
-static ssize_t gpio_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
-{
- unsigned int pin;
- char value = '0';
-
- pin = iminor(file->f_path.dentry->d_inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- if (vr41xx_gpio_get_pin(pin) == GPIO_DATA_HIGH)
- value = '1';
-
- if (len <= 0)
- return -EFAULT;
-
- if (put_user(value, buf))
- return -EFAULT;
-
- return 1;
-}
-
-static ssize_t gpio_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- unsigned int pin;
- size_t i;
- char c;
- int retval = 0;
-
- pin = iminor(file->f_path.dentry->d_inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- for (i = 0; i < len; i++) {
- if (get_user(c, data + i))
- return -EFAULT;
-
- switch (c) {
- case '0':
- retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_LOW);
- break;
- case '1':
- retval = vr41xx_gpio_set_pin(pin, GPIO_DATA_HIGH);
- break;
- case 'D':
- printk(KERN_INFO "GPIO%d: pull down\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DOWN);
- break;
- case 'd':
- printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE);
- break;
- case 'I':
- printk(KERN_INFO "GPIO%d: input\n", pin);
- retval = vr41xx_gpio_set_direction(pin, GPIO_INPUT);
- break;
- case 'O':
- printk(KERN_INFO "GPIO%d: output\n", pin);
- retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT);
- break;
- case 'o':
- printk(KERN_INFO "GPIO%d: output disable\n", pin);
- retval = vr41xx_gpio_set_direction(pin, GPIO_OUTPUT_DISABLE);
- break;
- case 'P':
- printk(KERN_INFO "GPIO%d: pull up\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_UP);
- break;
- case 'p':
- printk(KERN_INFO "GPIO%d: pull up/down disable\n", pin);
- retval = vr41xx_gpio_pullupdown(pin, GPIO_PULL_DISABLE);
- break;
- default:
- break;
- }
-
- if (retval < 0)
- break;
- }
-
- return i;
-}
-
-static int gpio_open(struct inode *inode, struct file *file)
-{
- unsigned int pin;
-
- cycle_kernel_lock();
- pin = iminor(inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- return nonseekable_open(inode, file);
-}
-
-static int gpio_release(struct inode *inode, struct file *file)
-{
- unsigned int pin;
-
- pin = iminor(inode);
- if (pin >= giu_nr_pins)
- return -EBADF;
-
- return 0;
-}
-
-static const struct file_operations gpio_fops = {
- .owner = THIS_MODULE,
- .read = gpio_read,
- .write = gpio_write,
- .open = gpio_open,
- .release = gpio_release,
-};
-
-static int __devinit giu_probe(struct platform_device *dev)
-{
- struct resource *res;
- unsigned int trigger, i, pin;
- struct irq_chip *chip;
- int irq, retval;
-
- switch (dev->id) {
- case GPIO_50PINS_PULLUPDOWN:
- giu_flags = GPIO_HAS_PULLUPDOWN_IO;
- giu_nr_pins = 50;
- break;
- case GPIO_36PINS:
- giu_nr_pins = 36;
- break;
- case GPIO_48PINS_EDGE_SELECT:
- giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
- giu_nr_pins = 48;
- break;
- default:
- printk(KERN_ERR "GIU: unknown ID %d\n", dev->id);
- return -ENODEV;
- }
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- giu_base = ioremap(res->start, res->end - res->start + 1);
- if (!giu_base)
- return -ENOMEM;
-
- retval = register_chrdev(major, "GIU", &gpio_fops);
- if (retval < 0) {
- iounmap(giu_base);
- giu_base = NULL;
- return retval;
- }
-
- if (major == 0) {
- major = retval;
- printk(KERN_INFO "GIU: major number %d\n", major);
- }
-
- spin_lock_init(&giu_lock);
-
- giu_write(GIUINTENL, 0);
- giu_write(GIUINTENH, 0);
-
- trigger = giu_read(GIUINTTYPH) << 16;
- trigger |= giu_read(GIUINTTYPL);
- for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
- pin = GPIO_PIN_OF_IRQ(i);
- if (pin < GIUINT_HIGH_OFFSET)
- chip = &giuint_low_irq_chip;
- else
- chip = &giuint_high_irq_chip;
-
- if (trigger & (1 << pin))
- set_irq_chip_and_handler(i, chip, handle_edge_irq);
- else
- set_irq_chip_and_handler(i, chip, handle_level_irq);
-
- }
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0 || irq >= nr_irqs)
- return -EBUSY;
-
- return cascade_irq(irq, giu_get_irq);
-}
-
-static int __devexit giu_remove(struct platform_device *dev)
-{
- if (giu_base) {
- iounmap(giu_base);
- giu_base = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver giu_device_driver = {
- .probe = giu_probe,
- .remove = __devexit_p(giu_remove),
- .driver = {
- .name = "GIU",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init vr41xx_giu_init(void)
-{
- return platform_driver_register(&giu_device_driver);
-}
-
-static void __exit vr41xx_giu_exit(void)
-{
- platform_driver_unregister(&giu_device_driver);
-}
-
-module_init(vr41xx_giu_init);
-module_exit(vr41xx_giu_exit);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 9ffb05f4095d..93c2322feab7 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
if (periodic)
sh_tmu_write(p, TCOR, delta);
else
- sh_tmu_write(p, TCOR, 0);
+ sh_tmu_write(p, TCOR, 0xffffffff);
sh_tmu_write(p, TCNT, delta);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..a7ef465c83b9 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
int cpu;
unsigned int enable:1;
};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
- struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
+ struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
freq->cpu);
struct cpufreq_policy *policy;
@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(cpu_dbs_info, j);
+ dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j;
int rc;
- this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..36f292a7bd01 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,7 +73,7 @@ struct cpu_dbs_info_s {
unsigned int enable:1,
sample_type:1;
};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
@@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ policy->cpu);
if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0;
@@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void)
{
int i;
for_each_online_cpu(i) {
- struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i);
dbs_info->freq_table = cpufreq_frequency_get_table(i);
dbs_info->freq_lo = 0;
}
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(cpu_dbs_info, j);
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
@@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
unsigned int load, load_freq;
int freq_avg;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
@@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j;
int rc;
- this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
switch (event) {
case CPUFREQ_GOV_START:
@@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 070357aaedbc..d956ce90c9ea 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,7 +4,7 @@
menuconfig DMADEVICES
bool "DMA Engine support"
- depends on !HIGHMEM64G && HAS_DMA
+ depends on HAS_DMA
help
DMA engines can do asynchronous data transfers without
involving the host CPU. Currently, this framework can be
@@ -108,7 +108,7 @@ config NET_DMA
config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
- depends on DMA_ENGINE
+ depends on DMA_ENGINE && !HIGHMEM64G
help
This allows the async_tx api to take advantage of offload engines for
memcpy, memset, xor, and raid6 p+q operations. If your platform has
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index fb7da5141e96..cec1ec0b7d00 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -114,7 +114,7 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
for ( ; i < start + len; i++)
buf[i] = PATTERN_SRC | PATTERN_COPY
- | (~i & PATTERN_COUNT_MASK);;
+ | (~i & PATTERN_COUNT_MASK);
for ( ; i < test_buf_size; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
buf++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 98c9a847bf51..933c143b6a74 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1399,8 +1399,9 @@ static void dw_shutdown(struct platform_device *pdev)
clk_disable(dw->clk);
}
-static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+static int dw_suspend_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
@@ -1408,23 +1409,27 @@ static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
return 0;
}
-static int dw_resume_early(struct platform_device *pdev)
+static int dw_resume_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
clk_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
return 0;
-
}
+static struct dev_pm_ops dw_dev_pm_ops = {
+ .suspend_noirq = dw_suspend_noirq,
+ .resume_noirq = dw_resume_noirq,
+};
+
static struct platform_driver dw_driver = {
.remove = __exit_p(dw_remove),
.shutdown = dw_shutdown,
- .suspend_late = dw_suspend_late,
- .resume_early = dw_resume_early,
.driver = {
.name = "dw_dmac",
+ .pm = &dw_dev_pm_ops,
},
};
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f18d1bde0439..ef87a8984145 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -12,6 +12,11 @@
* also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
* The support for MPC8349 DMA contorller is also added.
*
+ * This driver instructs the DMA controller to issue the PCI Read Multiple
+ * command for PCI read operations, instead of using the default PCI Read Line
+ * command. Please be aware that this setting may result in read pre-fetching
+ * on some platforms.
+ *
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -49,9 +54,10 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
case FSL_DMA_IP_83XX:
/* Set the channel to below modes:
* EOTIE - End-of-transfer interrupt enable
+ * PRC_RM - PCI read multiple
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
- 32);
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
+ | FSL_DMA_MR_PRC_RM, 32);
break;
}
@@ -136,15 +142,16 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
static void dma_start(struct fsl_dma_chan *fsl_chan)
{
- u32 mr_set = 0;;
+ u32 mr_set = 0;
if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
mr_set |= FSL_DMA_MR_EMP_EN;
- } else
+ } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
& ~FSL_DMA_MR_EMP_EN, 32);
+ }
if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
mr_set |= FSL_DMA_MR_EMS_EN;
@@ -871,9 +878,9 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
- new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
case FSL_DMA_IP_83XX:
+ new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
}
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4f21a512d848..dc7f26865797 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -38,6 +38,7 @@
/* Special MR definition for MPC8349 */
#define FSL_DMA_MR_EOTIE 0x00000080
+#define FSL_DMA_MR_PRC_RM 0x00000800
#define FSL_DMA_SR_CH 0x00000020
#define FSL_DMA_SR_PE 0x00000010
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 2225bb6ba3d1..bef6d4d7a1fd 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -40,26 +40,28 @@ MODULE_AUTHOR("Intel Corporation");
static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v1 platforms */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
+ { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
/* I/OAT v2 platforms */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
/* I/OAT v3 platforms */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
+
struct ioat_device {
struct pci_dev *pdev;
void __iomem *iobase;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index ddab94f51224..3f23eabe09f2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1176,7 +1176,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- dma_dev->max_xor = 8; ;
+ dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
}
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 88dab52926f4..7837930146a4 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1291,17 +1291,18 @@ static void txx9dmac_shutdown(struct platform_device *pdev)
txx9dmac_off(ddev);
}
-static int txx9dmac_suspend_late(struct platform_device *pdev,
- pm_message_t mesg)
+static int txx9dmac_suspend_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
txx9dmac_off(ddev);
return 0;
}
-static int txx9dmac_resume_early(struct platform_device *pdev)
+static int txx9dmac_resume_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
u32 mcr;
@@ -1314,6 +1315,11 @@ static int txx9dmac_resume_early(struct platform_device *pdev)
}
+static struct dev_pm_ops txx9dmac_dev_pm_ops = {
+ .suspend_noirq = txx9dmac_suspend_noirq,
+ .resume_noirq = txx9dmac_resume_noirq,
+};
+
static struct platform_driver txx9dmac_chan_driver = {
.remove = __exit_p(txx9dmac_chan_remove),
.driver = {
@@ -1324,10 +1330,9 @@ static struct platform_driver txx9dmac_chan_driver = {
static struct platform_driver txx9dmac_driver = {
.remove = __exit_p(txx9dmac_remove),
.shutdown = txx9dmac_shutdown,
- .suspend_late = txx9dmac_suspend_late,
- .resume_early = txx9dmac_resume_early,
.driver = {
.name = "txx9dmac",
+ .pm = &txx9dmac_dev_pm_ops,
},
};
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c36bf40568cf..858fe6037223 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -754,13 +754,13 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
int bit;
- enum dev_type edac_cap = EDAC_NONE;
+ enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
? 19
: 17;
- if (pvt->dclr0 >> BIT(bit))
+ if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
@@ -1269,7 +1269,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
if (channels == 0)
channels = 1;
- debugf0("DIMM count= %d\n", channels);
+ debugf0("MCT channel count: %d\n", channels);
return channels;
@@ -2966,7 +2966,12 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
" Use of the override can cause "
"unknown side effects.\n");
ret = -ENODEV;
- }
+ } else
+ /*
+ * enable further driver loading if ECC enable is
+ * overridden.
+ */
+ ret = 0;
} else {
amd64_printk(KERN_INFO,
"ECC is enabled by BIOS, Proceeding "
@@ -3006,7 +3011,6 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
if (pvt->nbcap & K8_NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
@@ -3052,7 +3056,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
if (!pvt)
goto err_exit;
- pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
+ pvt->mc_node_id = get_node_id(dram_f2_ctl);
pvt->dram_f2_ctl = dram_f2_ctl;
pvt->ext_model = boot_cpu_data.x86_model >> 4;
@@ -3179,8 +3183,7 @@ static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
{
int ret = 0;
- debugf0("(MC node=%d,mc_type='%s')\n",
- get_mc_node_id_from_pdev(pdev),
+ debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
get_amd_family_name(mc_type->driver_data));
ret = pci_enable_device(pdev);
@@ -3319,15 +3322,17 @@ static int __init amd64_edac_init(void)
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
- goto err_exit;
+ goto err_2nd_stage;
}
amd64_setup_pci_device();
return 0;
+err_2nd_stage:
+ debugf0("2nd stage failed\n");
+
err_exit:
- debugf0("'finish_setup' stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index a159957e167b..ba73015af8e4 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -444,7 +444,7 @@ enum {
#define K8_MSR_MC4ADDR 0x0412
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
+static inline int get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb820..871c13b4c148 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
MEM_XDR, /* Rambus XDR */
+ MEM_DDR3, /* DDR3 RAM */
+ MEM_RDDR3, /* Registered DDR3 RAM */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942d..e1d4ce083481 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
- [MEM_XDR] = "XDR"
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "Unbuffered-DDR3",
+ [MEM_RDDR3] = "Registered-DDR3"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916f..3f2ccfc6407c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_RDDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_RDDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_DDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_DDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a030..52432ee7c4b9 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
#define DSC_SDTYPE_DDR 0x02000000
#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_SDTYPE_DDR3 0x07000000
#define DSC_X32_EN 0x00000020
/* Err_Int_En */
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 543fccac81bb..f74edae5cb4c 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -196,8 +196,8 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
- fw_iso_resource_manage(card, generation, 1ULL << 31,
- &channel, &bandwidth, true);
+ fw_iso_resource_manage(card, generation, 1ULL << 31, &channel,
+ &bandwidth, true, card->bm_transaction_data);
if (channel == 31) {
card->broadcast_channel_allocated = true;
device_for_each_child(card->device, (void *)(long)generation,
@@ -230,7 +230,6 @@ static void fw_card_bm_work(struct work_struct *work)
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
- __be32 lock_data[2];
spin_lock_irqsave(&card->lock, flags);
@@ -273,22 +272,23 @@ static void fw_card_bm_work(struct work_struct *work)
goto pick_me;
}
- lock_data[0] = cpu_to_be32(0x3f);
- lock_data[1] = cpu_to_be32(local_id);
+ card->bm_transaction_data[0] = cpu_to_be32(0x3f);
+ card->bm_transaction_data[1] = cpu_to_be32(local_id);
spin_unlock_irqrestore(&card->lock, flags);
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- lock_data, sizeof(lock_data));
+ card->bm_transaction_data,
+ sizeof(card->bm_transaction_data));
if (rcode == RCODE_GENERATION)
/* Another bus reset, BM work has been rescheduled. */
goto out;
if (rcode == RCODE_COMPLETE &&
- lock_data[0] != cpu_to_be32(0x3f)) {
+ card->bm_transaction_data[0] != cpu_to_be32(0x3f)) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index d1d30c615b0f..ced186d7e9a9 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -125,6 +125,7 @@ struct iso_resource {
int generation;
u64 channels;
s32 bandwidth;
+ __be32 transaction_data[2];
struct iso_resource_event *e_alloc, *e_dealloc;
};
@@ -1049,7 +1050,8 @@ static void iso_resource_work(struct work_struct *work)
r->channels, &channel, &bandwidth,
todo == ISO_RES_ALLOC ||
todo == ISO_RES_REALLOC ||
- todo == ISO_RES_ALLOC_ONCE);
+ todo == ISO_RES_ALLOC_ONCE,
+ r->transaction_data);
/*
* Is this generation outdated already? As long as this resource sticks
* in the idr, it will be scheduled again for a newer generation or at
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 166f19c6d38d..110e731f5574 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -177,9 +177,8 @@ EXPORT_SYMBOL(fw_iso_context_stop);
*/
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
- int bandwidth, bool allocate)
+ int bandwidth, bool allocate, __be32 data[2])
{
- __be32 data[2];
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
/*
@@ -215,9 +214,9 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
}
static int manage_channel(struct fw_card *card, int irm_id, int generation,
- u32 channels_mask, u64 offset, bool allocate)
+ u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
- __be32 data[2], c, all, old;
+ __be32 c, all, old;
int i, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -260,7 +259,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
}
static void deallocate_channel(struct fw_card *card, int irm_id,
- int generation, int channel)
+ int generation, int channel, __be32 buffer[2])
{
u32 mask;
u64 offset;
@@ -269,7 +268,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
- manage_channel(card, irm_id, generation, mask, offset, false);
+ manage_channel(card, irm_id, generation, mask, offset, false, buffer);
}
/**
@@ -298,7 +297,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
*/
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
- bool allocate)
+ bool allocate, __be32 buffer[2])
{
u32 channels_hi = channels_mask; /* channels 31...0 */
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
@@ -310,10 +309,12 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
- CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
+ allocate, buffer);
if (channels_lo && c < 0) {
c = manage_channel(card, irm_id, generation, channels_lo,
- CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
+ allocate, buffer);
if (c >= 0)
c += 32;
}
@@ -325,12 +326,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (*bandwidth == 0)
return;
- ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+ ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
+ allocate, buffer);
if (ret < 0)
*bandwidth = 0;
if (allocate && ret < 0 && c >= 0) {
- deallocate_channel(card, irm_id, generation, c);
+ deallocate_channel(card, irm_id, generation, c, buffer);
*channel = ret;
}
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c3cfc647e5e3..6052816be353 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -120,7 +120,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
void fw_iso_resource_manage(struct fw_card *card, int generation,
- u64 channels_mask, int *channel, int *bandwidth, bool allocate);
+ u64 channels_mask, int *channel, int *bandwidth,
+ bool allocate, __be32 buffer[2]);
/* -topology */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 24c45635376a..8d51568ee143 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -201,6 +201,12 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
/*
+ * There is no transport protocol limit to the CDB length, but we implement
+ * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
+ */
+#define SBP2_MAX_CDB_SIZE 16
+
+/*
* The default maximum s/g segment size of a FireWire controller is
* usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
* be quadlet-aligned, we set the length limit to 0xffff & ~3.
@@ -312,7 +318,7 @@ struct sbp2_command_orb {
struct sbp2_pointer next;
struct sbp2_pointer data_descriptor;
__be32 misc;
- u8 command_block[12];
+ u8 command_block[SBP2_MAX_CDB_SIZE];
} request;
struct scsi_cmnd *cmd;
scsi_done_fn_t done;
@@ -1146,6 +1152,8 @@ static int sbp2_probe(struct device *dev)
if (fw_device_enable_phys_dma(device) < 0)
goto fail_shost_put;
+ shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
+
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 18d65fb42ee7..b24ccb546e59 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -237,6 +237,22 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
return count;
}
+static void generate_smi(void *_smi_cmd)
+{
+ struct smi_cmd *smi_cmd = _smi_cmd;
+
+ /* generate SMI */
+ asm volatile (
+ "outb %b0,%w1"
+ : /* no output args */
+ : "a" (smi_cmd->command_code),
+ "d" (smi_cmd->command_address),
+ "b" (smi_cmd->ebx),
+ "c" (smi_cmd->ecx)
+ : "memory"
+ );
+}
+
/**
* dcdbas_smi_request: generate SMI request
*
@@ -244,9 +260,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
*/
int dcdbas_smi_request(struct smi_cmd *smi_cmd)
{
- cpumask_var_t old_mask;
- int ret = 0;
-
if (smi_cmd->magic != SMI_CMD_MAGIC) {
dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
__func__);
@@ -254,35 +267,9 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* SMI requires CPU 0 */
- if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_copy(old_mask, &current->cpus_allowed);
- set_cpus_allowed_ptr(current, cpumask_of(0));
- if (smp_processor_id() != 0) {
- dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
- __func__);
- ret = -EBUSY;
- goto out;
- }
-
- /* generate SMI */
- asm volatile (
- "outb %b0,%w1"
- : /* no output args */
- : "a" (smi_cmd->command_code),
- "d" (smi_cmd->command_address),
- "b" (smi_cmd->ebx),
- "c" (smi_cmd->ecx)
- : "memory"
- );
-
-out:
- set_cpus_allowed_ptr(current, old_mask);
- free_cpumask_var(old_mask);
- return ret;
+ smp_call_function_single(0, generate_smi, smi_cmd, 1);
+ return 0;
}
-
/**
* smi_request_store:
*
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 24c84ae81527..60710783408b 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -20,21 +20,32 @@ static char dmi_empty_string[] = " ";
*/
static int dmi_initialized;
+static u8 *dmi_end;
+
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
if (s) {
s--;
- while (s > 0 && *bp) {
+ while (s > 0) {
+ /* Catch corrupt table ends before we walk into
+ undefined mappings */
+ if (bp >= dmi_end)
+ goto invalid;
+ if (*bp == 0)
+ break;
bp += strlen(bp) + 1;
s--;
}
if (*bp != 0) {
- size_t len = strlen(bp)+1;
+ /* Use strnlen in case we have a BIOS table error */
+ size_t len = strnlen(bp, dmi_end - bp) + 1;
size_t cmp_len = len > 8 ? 8 : len;
+ if (bp == dmi_end)
+ goto invalid;
if (!memcmp(bp, dmi_empty_string, cmp_len))
return dmi_empty_string;
return bp;
@@ -42,6 +53,9 @@ static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
}
return "";
+invalid:
+ printk(KERN_ERR "dmi: table overrun - corrupt DMI tables ?\n");
+ return dmi_empty_string;
}
static char * __init dmi_string(const struct dmi_header *dm, u8 s)
@@ -109,9 +123,11 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
if (buf == NULL)
return -1;
+ /* Remember the end mark for string walking */
+ dmi_end = buf + dmi_len;
dmi_table(buf, dmi_len, dmi_num, decode, NULL);
-
dmi_iounmap(buf, dmi_len);
+
return 0;
}
@@ -154,7 +170,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
char *s;
int is_ff = 1, is_00 = 1, i;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || d > dmi_end - 16)
return;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
@@ -182,7 +198,7 @@ static void __init dmi_save_type(const struct dmi_header *dm, int slot, int inde
const u8 *d = (u8*) dm + index;
char *s;
- if (dmi_ident[slot])
+ if (dmi_ident[slot] || d == dmi_end)
return;
s = dmi_alloc(4);
@@ -260,6 +276,11 @@ static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
struct dmi_device *dev;
void * data;
+ if (((u8 *)dm) + dm->length > dmi_end) {
+ printk(KERN_ERR "dmi_save_ipmi_device: table overrun.\n");
+ return;
+ }
+
data = dmi_alloc(dm->length);
if (data == NULL) {
printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
@@ -285,6 +306,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
const u8 *d = (u8*) dm + 5;
+ if (d >= dmi_end)
+ return;
+
/* Skip disabled device */
if ((*d & 0x80) == 0)
return;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3582c39f9725..96dda81c9228 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -79,6 +79,12 @@ config GPIO_XILINX
help
Say yes here to support the Xilinx FPGA GPIO device
+config GPIO_VR41XX
+ tristate "NEC VR4100 series General-purpose I/O Uint support"
+ depends on CPU_VR41XX
+ help
+ Say yes here to support the NEC VR4100 series General-purpose I/O Uint
+
comment "I2C GPIO expanders:"
config GPIO_MAX732X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ef90203e8f3c..9244c6fcd8be 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
+obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d9..4ee4c8367a3f 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
+static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+
+ if (chip->irq_base == (unsigned) -1)
+ return -EINVAL;
+
+ return chip->irq_base + offset;
+}
+
/*
* PL061 GPIO IRQ
*/
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->chip->ack(irq);
list_for_each(ptr, chip_list) {
unsigned long pending;
- int gpio;
+ int offset;
chip = list_entry(ptr, struct pl061_gpio, list);
pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
if (pending == 0)
continue;
- for_each_bit(gpio, &pending, PL061_GPIO_NR)
- generic_handle_irq(gpio_to_irq(gpio));
+ for_each_bit(offset, &pending, PL061_GPIO_NR)
+ generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
struct pl061_gpio *chip;
struct list_head *chip_list;
int ret, irq, i;
- static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)];
+ static DECLARE_BITMAP(init_irq, NR_IRQS);
pdata = dev->dev.platform_data;
if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
+ chip->gc.to_irq = pl061_to_irq;
chip->gc.base = pdata->gpio_base;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
+ clear_bit(irq, init_irq);
ret = -ENOMEM;
goto iounmap;
}
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
new file mode 100644
index 000000000000..b70e06133e78
--- /dev/null
+++ b/drivers/gpio/vr41xx_giu.c
@@ -0,0 +1,586 @@
+/*
+ * Driver for NEC VR4100 series General-purpose I/O Unit.
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ * Author: Yoichi Yuasa <source@mvista.com>
+ * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/vr41xx/giu.h>
+#include <asm/vr41xx/irq.h>
+#include <asm/vr41xx/vr41xx.h>
+
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
+MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
+MODULE_LICENSE("GPL");
+
+#define GIUIOSELL 0x00
+#define GIUIOSELH 0x02
+#define GIUPIODL 0x04
+#define GIUPIODH 0x06
+#define GIUINTSTATL 0x08
+#define GIUINTSTATH 0x0a
+#define GIUINTENL 0x0c
+#define GIUINTENH 0x0e
+#define GIUINTTYPL 0x10
+#define GIUINTTYPH 0x12
+#define GIUINTALSELL 0x14
+#define GIUINTALSELH 0x16
+#define GIUINTHTSELL 0x18
+#define GIUINTHTSELH 0x1a
+#define GIUPODATL 0x1c
+#define GIUPODATEN 0x1c
+#define GIUPODATH 0x1e
+ #define PIOEN0 0x0100
+ #define PIOEN1 0x0200
+#define GIUPODAT 0x1e
+#define GIUFEDGEINHL 0x20
+#define GIUFEDGEINHH 0x22
+#define GIUREDGEINHL 0x24
+#define GIUREDGEINHH 0x26
+
+#define GIUUSEUPDN 0x1e0
+#define GIUTERMUPDN 0x1e2
+
+#define GPIO_HAS_PULLUPDOWN_IO 0x0001
+#define GPIO_HAS_OUTPUT_ENABLE 0x0002
+#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
+
+enum {
+ GPIO_INPUT,
+ GPIO_OUTPUT,
+};
+
+static DEFINE_SPINLOCK(giu_lock);
+static unsigned long giu_flags;
+
+static void __iomem *giu_base;
+
+#define giu_read(offset) readw(giu_base + (offset))
+#define giu_write(offset, value) writew((value), giu_base + (offset))
+
+#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
+#define GIUINT_HIGH_OFFSET 16
+#define GIUINT_HIGH_MAX 32
+
+static inline u16 giu_set(u16 offset, u16 set)
+{
+ u16 data;
+
+ data = giu_read(offset);
+ data |= set;
+ giu_write(offset, data);
+
+ return data;
+}
+
+static inline u16 giu_clear(u16 offset, u16 clear)
+{
+ u16 data;
+
+ data = giu_read(offset);
+ data &= ~clear;
+ giu_write(offset, data);
+
+ return data;
+}
+
+static void ack_giuint_low(unsigned int irq)
+{
+ giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+}
+
+static void mask_giuint_low(unsigned int irq)
+{
+ giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+}
+
+static void mask_ack_giuint_low(unsigned int irq)
+{
+ unsigned int pin;
+
+ pin = GPIO_PIN_OF_IRQ(irq);
+ giu_clear(GIUINTENL, 1 << pin);
+ giu_write(GIUINTSTATL, 1 << pin);
+}
+
+static void unmask_giuint_low(unsigned int irq)
+{
+ giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+}
+
+static struct irq_chip giuint_low_irq_chip = {
+ .name = "GIUINTL",
+ .ack = ack_giuint_low,
+ .mask = mask_giuint_low,
+ .mask_ack = mask_ack_giuint_low,
+ .unmask = unmask_giuint_low,
+};
+
+static void ack_giuint_high(unsigned int irq)
+{
+ giu_write(GIUINTSTATH,
+ 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+}
+
+static void mask_giuint_high(unsigned int irq)
+{
+ giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+}
+
+static void mask_ack_giuint_high(unsigned int irq)
+{
+ unsigned int pin;
+
+ pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+ giu_clear(GIUINTENH, 1 << pin);
+ giu_write(GIUINTSTATH, 1 << pin);
+}
+
+static void unmask_giuint_high(unsigned int irq)
+{
+ giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+}
+
+static struct irq_chip giuint_high_irq_chip = {
+ .name = "GIUINTH",
+ .ack = ack_giuint_high,
+ .mask = mask_giuint_high,
+ .mask_ack = mask_ack_giuint_high,
+ .unmask = unmask_giuint_high,
+};
+
+static int giu_get_irq(unsigned int irq)
+{
+ u16 pendl, pendh, maskl, maskh;
+ int i;
+
+ pendl = giu_read(GIUINTSTATL);
+ pendh = giu_read(GIUINTSTATH);
+ maskl = giu_read(GIUINTENL);
+ maskh = giu_read(GIUINTENH);
+
+ maskl &= pendl;
+ maskh &= pendh;
+
+ if (maskl) {
+ for (i = 0; i < 16; i++) {
+ if (maskl & (1 << i))
+ return GIU_IRQ(i);
+ }
+ } else if (maskh) {
+ for (i = 0; i < 16; i++) {
+ if (maskh & (1 << i))
+ return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
+ }
+ }
+
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+ atomic_inc(&irq_err_count);
+
+ return -EINVAL;
+}
+
+void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
+ irq_signal_t signal)
+{
+ u16 mask;
+
+ if (pin < GIUINT_HIGH_OFFSET) {
+ mask = 1 << pin;
+ if (trigger != IRQ_TRIGGER_LEVEL) {
+ giu_set(GIUINTTYPL, mask);
+ if (signal == IRQ_SIGNAL_HOLD)
+ giu_set(GIUINTHTSELL, mask);
+ else
+ giu_clear(GIUINTHTSELL, mask);
+ if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
+ switch (trigger) {
+ case IRQ_TRIGGER_EDGE_FALLING:
+ giu_set(GIUFEDGEINHL, mask);
+ giu_clear(GIUREDGEINHL, mask);
+ break;
+ case IRQ_TRIGGER_EDGE_RISING:
+ giu_clear(GIUFEDGEINHL, mask);
+ giu_set(GIUREDGEINHL, mask);
+ break;
+ default:
+ giu_set(GIUFEDGEINHL, mask);
+ giu_set(GIUREDGEINHL, mask);
+ break;
+ }
+ }
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_low_irq_chip,
+ handle_edge_irq);
+ } else {
+ giu_clear(GIUINTTYPL, mask);
+ giu_clear(GIUINTHTSELL, mask);
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_low_irq_chip,
+ handle_level_irq);
+ }
+ giu_write(GIUINTSTATL, mask);
+ } else if (pin < GIUINT_HIGH_MAX) {
+ mask = 1 << (pin - GIUINT_HIGH_OFFSET);
+ if (trigger != IRQ_TRIGGER_LEVEL) {
+ giu_set(GIUINTTYPH, mask);
+ if (signal == IRQ_SIGNAL_HOLD)
+ giu_set(GIUINTHTSELH, mask);
+ else
+ giu_clear(GIUINTHTSELH, mask);
+ if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
+ switch (trigger) {
+ case IRQ_TRIGGER_EDGE_FALLING:
+ giu_set(GIUFEDGEINHH, mask);
+ giu_clear(GIUREDGEINHH, mask);
+ break;
+ case IRQ_TRIGGER_EDGE_RISING:
+ giu_clear(GIUFEDGEINHH, mask);
+ giu_set(GIUREDGEINHH, mask);
+ break;
+ default:
+ giu_set(GIUFEDGEINHH, mask);
+ giu_set(GIUREDGEINHH, mask);
+ break;
+ }
+ }
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_high_irq_chip,
+ handle_edge_irq);
+ } else {
+ giu_clear(GIUINTTYPH, mask);
+ giu_clear(GIUINTHTSELH, mask);
+ set_irq_chip_and_handler(GIU_IRQ(pin),
+ &giuint_high_irq_chip,
+ handle_level_irq);
+ }
+ giu_write(GIUINTSTATH, mask);
+ }
+}
+EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
+
+void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
+{
+ u16 mask;
+
+ if (pin < GIUINT_HIGH_OFFSET) {
+ mask = 1 << pin;
+ if (level == IRQ_LEVEL_HIGH)
+ giu_set(GIUINTALSELL, mask);
+ else
+ giu_clear(GIUINTALSELL, mask);
+ giu_write(GIUINTSTATL, mask);
+ } else if (pin < GIUINT_HIGH_MAX) {
+ mask = 1 << (pin - GIUINT_HIGH_OFFSET);
+ if (level == IRQ_LEVEL_HIGH)
+ giu_set(GIUINTALSELH, mask);
+ else
+ giu_clear(GIUINTALSELH, mask);
+ giu_write(GIUINTSTATH, mask);
+ }
+}
+EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
+
+static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
+{
+ u16 offset, mask, reg;
+ unsigned long flags;
+
+ if (pin >= chip->ngpio)
+ return -EINVAL;
+
+ if (pin < 16) {
+ offset = GIUIOSELL;
+ mask = 1 << pin;
+ } else if (pin < 32) {
+ offset = GIUIOSELH;
+ mask = 1 << (pin - 16);
+ } else {
+ if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
+ offset = GIUPODATEN;
+ mask = 1 << (pin - 32);
+ } else {
+ switch (pin) {
+ case 48:
+ offset = GIUPODATH;
+ mask = PIOEN0;
+ break;
+ case 49:
+ offset = GIUPODATH;
+ mask = PIOEN1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&giu_lock, flags);
+
+ reg = giu_read(offset);
+ if (dir == GPIO_OUTPUT)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ giu_write(offset, reg);
+
+ spin_unlock_irqrestore(&giu_lock, flags);
+
+ return 0;
+}
+
+int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
+{
+ u16 reg, mask;
+ unsigned long flags;
+
+ if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
+ return -EPERM;
+
+ if (pin >= 15)
+ return -EINVAL;
+
+ mask = 1 << pin;
+
+ spin_lock_irqsave(&giu_lock, flags);
+
+ if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
+ reg = giu_read(GIUTERMUPDN);
+ if (pull == GPIO_PULL_UP)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ giu_write(GIUTERMUPDN, reg);
+
+ reg = giu_read(GIUUSEUPDN);
+ reg |= mask;
+ giu_write(GIUUSEUPDN, reg);
+ } else {
+ reg = giu_read(GIUUSEUPDN);
+ reg &= ~mask;
+ giu_write(GIUUSEUPDN, reg);
+ }
+
+ spin_unlock_irqrestore(&giu_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
+
+static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ u16 reg, mask;
+
+ if (pin >= chip->ngpio)
+ return -EINVAL;
+
+ if (pin < 16) {
+ reg = giu_read(GIUPIODL);
+ mask = 1 << pin;
+ } else if (pin < 32) {
+ reg = giu_read(GIUPIODH);
+ mask = 1 << (pin - 16);
+ } else if (pin < 48) {
+ reg = giu_read(GIUPODATL);
+ mask = 1 << (pin - 32);
+ } else {
+ reg = giu_read(GIUPODATH);
+ mask = 1 << (pin - 48);
+ }
+
+ if (reg & mask)
+ return 1;
+
+ return 0;
+}
+
+static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ u16 offset, mask, reg;
+ unsigned long flags;
+
+ if (pin >= chip->ngpio)
+ return;
+
+ if (pin < 16) {
+ offset = GIUPIODL;
+ mask = 1 << pin;
+ } else if (pin < 32) {
+ offset = GIUPIODH;
+ mask = 1 << (pin - 16);
+ } else if (pin < 48) {
+ offset = GIUPODATL;
+ mask = 1 << (pin - 32);
+ } else {
+ offset = GIUPODATH;
+ mask = 1 << (pin - 48);
+ }
+
+ spin_lock_irqsave(&giu_lock, flags);
+
+ reg = giu_read(offset);
+ if (value)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ giu_write(offset, reg);
+
+ spin_unlock_irqrestore(&giu_lock, flags);
+}
+
+
+static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return giu_set_direction(chip, offset, GPIO_INPUT);
+}
+
+static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ vr41xx_gpio_set(chip, offset, value);
+
+ return giu_set_direction(chip, offset, GPIO_OUTPUT);
+}
+
+static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ return GIU_IRQ_BASE + offset;
+}
+
+static struct gpio_chip vr41xx_gpio_chip = {
+ .label = "vr41xx",
+ .owner = THIS_MODULE,
+ .direction_input = vr41xx_gpio_direction_input,
+ .get = vr41xx_gpio_get,
+ .direction_output = vr41xx_gpio_direction_output,
+ .set = vr41xx_gpio_set,
+ .to_irq = vr41xx_gpio_to_irq,
+};
+
+static int __devinit giu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned int trigger, i, pin;
+ struct irq_chip *chip;
+ int irq, retval;
+
+ switch (pdev->id) {
+ case GPIO_50PINS_PULLUPDOWN:
+ giu_flags = GPIO_HAS_PULLUPDOWN_IO;
+ vr41xx_gpio_chip.ngpio = 50;
+ break;
+ case GPIO_36PINS:
+ vr41xx_gpio_chip.ngpio = 36;
+ break;
+ case GPIO_48PINS_EDGE_SELECT:
+ giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
+ vr41xx_gpio_chip.ngpio = 48;
+ break;
+ default:
+ dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EBUSY;
+
+ giu_base = ioremap(res->start, res->end - res->start + 1);
+ if (!giu_base)
+ return -ENOMEM;
+
+ vr41xx_gpio_chip.dev = &pdev->dev;
+
+ retval = gpiochip_add(&vr41xx_gpio_chip);
+
+ giu_write(GIUINTENL, 0);
+ giu_write(GIUINTENH, 0);
+
+ trigger = giu_read(GIUINTTYPH) << 16;
+ trigger |= giu_read(GIUINTTYPL);
+ for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
+ pin = GPIO_PIN_OF_IRQ(i);
+ if (pin < GIUINT_HIGH_OFFSET)
+ chip = &giuint_low_irq_chip;
+ else
+ chip = &giuint_high_irq_chip;
+
+ if (trigger & (1 << pin))
+ set_irq_chip_and_handler(i, chip, handle_edge_irq);
+ else
+ set_irq_chip_and_handler(i, chip, handle_level_irq);
+
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq >= nr_irqs)
+ return -EBUSY;
+
+ return cascade_irq(irq, giu_get_irq);
+}
+
+static int __devexit giu_remove(struct platform_device *pdev)
+{
+ if (giu_base) {
+ iounmap(giu_base);
+ giu_base = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver giu_device_driver = {
+ .probe = giu_probe,
+ .remove = __devexit_p(giu_remove),
+ .driver = {
+ .name = "GIU",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init vr41xx_giu_init(void)
+{
+ return platform_driver_register(&giu_device_driver);
+}
+
+static void __exit vr41xx_giu_exit(void)
+{
+ platform_driver_unregister(&giu_device_driver);
+}
+
+module_init(vr41xx_giu_init);
+module_exit(vr41xx_giu_exit);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c961fe415aef..39b393d38bb3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -81,6 +81,7 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
+ depends on AGP_INTEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4e89ab08b7b8..fe23f29f7cba 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
-obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7d0835226f6e..80cc6d06d61b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf);
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
@@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 51c5a050aa73..30d6b99fb302 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_dp.o \
+ intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index e747ac42fe3a..288fc50627e2 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -37,7 +37,7 @@ struct intel_dvo_device {
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct intel_i2c_chan *i2c_bus;
+ struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops {
* Returns NULL if the device does not exist.
*/
bool (*init)(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus);
+ struct i2c_adapter *i2cbus);
/*
* Called to allow the output a chance to create properties after the
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 03d4b4973b02..621815b531db 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
uint8_t val;
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
- val, i2cbus->adapter.name,i2cbus->slave_addr);
+ val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d2fd95dbd034..a9b896289680 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid)
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (ch7xxx == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
if (device != CH7xxx_DID) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0c8d375e8e37..aa176f9921fe 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 0,
},
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (!priv->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
if (!priv->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
/** Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
uint16_t temp;
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
priv->quiet = true;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 033a4bb070b2..e1c1f7341e5c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -76,19 +76,20 @@ struct sil164_priv {
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!sil->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!sil->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
/* Silicon Image 164 driver for chip on i2c bus */
static bool sil164_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the SIL164 chip on the specified i2c bus */
struct sil164_priv *sil;
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (sil == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = sil;
sil->quiet = true;
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (ch != (SIL164_VID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
if (ch != (SIL164_DID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 207fda806ebf..9ecc907384ec 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -101,19 +101,20 @@ struct tfp410_priv {
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
if (!tfp->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
if (!tfp->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
/* Ti TFP410 driver for chip on i2c bus */
static bool tfp410_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the tfp410 chip on the specified i2c bus */
struct tfp410_priv *tfp;
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
if (tfp == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = tfp;
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98560e1e899a..e3cb4025e323 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,8 +67,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_save_state(dev->pdev);
- i915_save_state(dev);
-
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (i915_gem_idle(dev))
@@ -77,6 +75,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
drm_irq_uninstall(dev);
}
+ i915_save_state(dev);
+
intel_opregion_free(dev, 1);
if (state.event == PM_EVENT_SUSPEND) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a84f04e8439..bb4c2d387b6c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -306,6 +306,17 @@ typedef struct drm_i915_private {
u32 saveCURBPOS;
u32 saveCURBBASE;
u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
struct {
struct drm_mm gtt_space;
@@ -857,6 +868,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd2b8bdffe3f..876b65cb7629 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+ DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
@@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %d)\n",
+ DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
obj_priv = obj->driver_private;
@@ -2423,7 +2423,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
#if WATCH_BUF
- DRM_INFO("Binding object of size %d at 0x%08x\n",
+ DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
ret = i915_gem_object_get_pages(obj);
@@ -4227,6 +4227,7 @@ i915_gem_lastclose(struct drm_device *dev)
void
i915_gem_load(struct drm_device *dev)
{
+ int i;
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4246,6 +4247,18 @@ i915_gem_load(struct drm_device *dev)
else
dev_priv->num_fence_regs = 8;
+ /* Initialize fence registers to zero */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+ } else {
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ }
+
i915_gem_detect_bit_6_swizzle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8d0b943e2c5a..e602614bd3f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->page_list[page],
+ i915_gem_dump_page(obj_priv->pages[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
uint32_t *backing_map = NULL;
int bad_count = 0;
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+ backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5c1ceec49f5b..daeae62e1c28 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
ret = 0;
goto out_put;
}
+#endif
/* Get some space for it */
ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b86b7b7130c6..228546f6eaa4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -232,7 +232,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_device *dev = dev_priv->dev;
-
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->hot_plug)
+ (*intel_output->hot_plug) (intel_output);
+ }
+ }
/* Just fire off a uevent and let userspace tell us what to do */
drm_sysfs_hotplug_event(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f6237a0b1133..88bf7521405f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -569,6 +569,19 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/* Clocking configuration register */
+#define CLKCFG 0x10c00
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+/* this is a guess, could be 5 as well */
+#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT (5 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
@@ -834,9 +847,25 @@
#define HORIZ_INTERP_MASK (3 << 6)
#define HORIZ_AUTO_SCALE (1 << 5)
#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_FILTER_FUZZY (0 << 24)
+#define PFIT_SCALING_AUTO (0 << 26)
+#define PFIT_SCALING_PROGRAMMED (1 << 26)
+#define PFIT_SCALING_PILLAR (2 << 26)
+#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234
#define PFIT_VERT_SCALE_MASK 0xfff00000
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* Pre-965 */
+#define PFIT_VERT_SCALE_SHIFT 20
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_SHIFT 4
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* 965+ */
+#define PFIT_VERT_SCALE_SHIFT_965 16
+#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
+#define PFIT_HORIZ_SCALE_SHIFT_965 0
+#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a98e2831ed31..8d8e083d14ab 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -322,6 +322,20 @@ int i915_save_state(struct drm_device *dev)
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->saveDP_B = I915_READ(DP_B);
+ dev_priv->saveDP_C = I915_READ(DP_C);
+ dev_priv->saveDP_D = I915_READ(DP_D);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ }
/* FIXME: save TV & SDVO state */
/* FBC state */
@@ -404,7 +418,19 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
}
-
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ }
+
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -518,6 +544,12 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->saveDP_B);
+ I915_WRITE(DP_C, dev_priv->saveDP_C);
+ I915_WRITE(DP_D, dev_priv->saveDP_D);
+ }
/* FIXME: restore TV & SDVO state */
/* FBC info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cdd126d068a7..716409a57244 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
{
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
struct bdb_lvds_lfp_data_entry *entry;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int lfp_data_size;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -119,9 +121,17 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data)
return;
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
dev_priv->lvds_vbt = 1;
- entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
+ lvds_options->panel_type));
dvo_timing = &entry->dvo_timing;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3e1c78162119..73e7b9cecac8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_dp.h"
#include "drm_crtc_helper.h"
@@ -127,19 +128,6 @@ struct intel_limit {
#define I9XX_P2_LVDS_FAST 7
#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-#define INTEL_LIMIT_I8XX_DVO_DAC 0
-#define INTEL_LIMIT_I8XX_LVDS 1
-#define INTEL_LIMIT_I9XX_SDVO_DAC 2
-#define INTEL_LIMIT_I9XX_LVDS 3
-#define INTEL_LIMIT_G4X_SDVO 4
-#define INTEL_LIMIT_G4X_HDMI_DAC 5
-#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
-#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
-#define INTEL_LIMIT_IGD_SDVO_DAC 8
-#define INTEL_LIMIT_IGD_LVDS 9
-#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
-#define INTEL_LIMIT_IGDNG_LVDS 11
-
/*The parameter is for SDVO on G4x platform*/
#define G4X_DOT_SDVO_MIN 25000
#define G4X_DOT_SDVO_MAX 270000
@@ -218,6 +206,25 @@ struct intel_limit {
#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+/*The parameter is for DISPLAY PORT on G4x platform*/
+#define G4X_DOT_DISPLAY_PORT_MIN 161670
+#define G4X_DOT_DISPLAY_PORT_MAX 227000
+#define G4X_N_DISPLAY_PORT_MIN 1
+#define G4X_N_DISPLAY_PORT_MAX 2
+#define G4X_M_DISPLAY_PORT_MIN 97
+#define G4X_M_DISPLAY_PORT_MAX 108
+#define G4X_M1_DISPLAY_PORT_MIN 0x10
+#define G4X_M1_DISPLAY_PORT_MAX 0x12
+#define G4X_M2_DISPLAY_PORT_MIN 0x05
+#define G4X_M2_DISPLAY_PORT_MAX 0x06
+#define G4X_P_DISPLAY_PORT_MIN 10
+#define G4X_P_DISPLAY_PORT_MAX 20
+#define G4X_P1_DISPLAY_PORT_MIN 1
+#define G4X_P1_DISPLAY_PORT_MAX 2
+#define G4X_P2_DISPLAY_PORT_SLOW 10
+#define G4X_P2_DISPLAY_PORT_FAST 10
+#define G4X_P2_DISPLAY_PORT_LIMIT 0
+
/* IGDNG */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
@@ -256,8 +263,11 @@ static bool
intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static const intel_limit_t intel_limits[] = {
- { /* INTEL_LIMIT_I8XX_DVO_DAC */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -269,8 +279,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I8XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -282,8 +293,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -295,8 +307,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -311,9 +324,10 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
+};
+
/* below parameter and function is for G4X Chipset Family*/
- { /* INTEL_LIMIT_G4X_SDVO */
+static const intel_limit_t intel_limits_g4x_sdvo = {
.dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
@@ -327,8 +341,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_HDMI_DAC */
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
.dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
@@ -342,8 +357,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -365,8 +381,9 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -388,8 +405,32 @@ static const intel_limit_t intel_limits[] = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_SDVO */
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+ .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
+ .max = G4X_DOT_DISPLAY_PORT_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_DISPLAY_PORT_MIN,
+ .max = G4X_N_DISPLAY_PORT_MAX },
+ .m = { .min = G4X_M_DISPLAY_PORT_MIN,
+ .max = G4X_M_DISPLAY_PORT_MAX },
+ .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
+ .max = G4X_M1_DISPLAY_PORT_MAX },
+ .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
+ .max = G4X_M2_DISPLAY_PORT_MAX },
+ .p = { .min = G4X_P_DISPLAY_PORT_MIN,
+ .max = G4X_P_DISPLAY_PORT_MAX },
+ .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
+ .max = G4X_P1_DISPLAY_PORT_MAX},
+ .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
+ .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
+ .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
+ .find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_igd_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -401,8 +442,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_LVDS */
+};
+
+static const intel_limit_t intel_limits_igd_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -415,8 +457,9 @@ static const intel_limit_t intel_limits[] = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_igdng_sdvo = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -429,8 +472,9 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
.p2_fast = IGDNG_P2_SDVO_DAC_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_LVDS */
+};
+
+static const intel_limit_t intel_limits_igdng_lvds = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -443,16 +487,15 @@ static const intel_limit_t intel_limits[] = {
.p2_slow = IGDNG_P2_LVDS_SLOW,
.p2_fast = IGDNG_P2_LVDS_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
};
static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
+ limit = &intel_limits_igdng_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
+ limit = &intel_limits_igdng_sdvo;
return limit;
}
@@ -467,19 +510,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_dual_channel_lvds;
else
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+ limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+ limit = &intel_limits_g4x_sdvo;
+ } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
return limit;
}
@@ -495,19 +538,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
limit = intel_g4x_limit(crtc);
} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ limit = &intel_limits_i9xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
} else if (IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+ limit = &intel_limits_igd_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
+ limit = &intel_limits_igd_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+ limit = &intel_limits_i8xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+ limit = &intel_limits_i8xx_dvo;
}
return limit;
}
@@ -764,6 +807,35 @@ out:
return found;
}
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.dot = 161670;
+ clock.p = 20;
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 0x01;
+ clock.m = 97;
+ clock.m1 = 0x10;
+ clock.m2 = 0x05;
+ } else {
+ clock.dot = 270000;
+ clock.p = 10;
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 0x02;
+ clock.m = 108;
+ clock.m1 = 0x12;
+ clock.m2 = 0x06;
+ }
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
void
intel_wait_for_vblank(struct drm_device *dev)
{
@@ -1541,7 +1613,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const intel_limit_t *limit;
@@ -1585,6 +1657,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
}
num_outputs++;
@@ -1600,6 +1675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
} else {
refclk = 48000;
}
+
/*
* Returns a set of divisors for the desired target clock with the given
@@ -1662,6 +1738,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else if (IS_IGDNG(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
+ if (is_dp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_IGD(dev))
@@ -1809,6 +1887,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
+ if (is_dp)
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
@@ -2475,6 +2555,8 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
@@ -2487,7 +2569,11 @@ static void intel_setup_outputs(struct drm_device *dev)
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_C);
}
+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ intel_dp_init(dev, DP_D);
} else
intel_dvo_init(dev);
@@ -2530,6 +2616,11 @@ static void intel_setup_outputs(struct drm_device *dev)
(1 << 1));
clone_mask = (1 << INTEL_OUTPUT_TVOUT);
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = ((1 << 0) |
+ (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones = intel_connector_clones(dev, clone_mask);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 000000000000..8f8d37d5663a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_dp.h"
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp_priv {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ uint32_t save_DP;
+ uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int dpms_mode;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct intel_output *intel_output;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+};
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+
+static int
+intel_dp_max_lane_count(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_lane_count = 4;
+
+ if (dp_priv->dpcd[0] >= 0x11) {
+ max_lane_count = dp_priv->dpcd[2] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+intel_dp_max_link_bw(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_link_bw = dp_priv->dpcd[1];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+/* I think this is a fiction */
+static int
+intel_dp_link_required(int pixel_clock)
+{
+ return pixel_clock * 3;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
+ int max_lanes = intel_dp_max_lane_count(intel_output);
+
+ if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+static int
+intel_dp_aux_ch(struct intel_output *intel_output,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t output_reg = dp_priv->output_reg;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t ctl;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ */
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4) {
+ uint32_t d = pack_aux(send + i, send_bytes - i);;
+
+ I915_WRITE(ch_data + i, d);
+ }
+
+ ctl = (DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl, ctl);
+ (void) I915_READ(ch_ctl);
+ for (;;) {
+ udelay(100);
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ }
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl, (ctl |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR));
+ (void) I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ printk(KERN_ERR "dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ printk(KERN_ERR "dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ printk(KERN_ERR "dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4) {
+ uint32_t d = I915_READ(ch_data + i);
+
+ unpack_aux(d, recv + i, recv_bytes - i);
+ }
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_output *intel_output,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_output *intel_output,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_output, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_output *intel_output,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes)
+{
+ struct intel_dp_priv *dp_priv = container_of(adapter,
+ struct intel_dp_priv,
+ adapter);
+ struct intel_output *intel_output = dp_priv->intel_output;
+
+ return intel_dp_aux_ch(intel_output,
+ send, send_bytes, recv, recv_bytes);
+}
+
+static int
+intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ DRM_ERROR("i2c_init %s\n", name);
+ dp_priv->algo.running = false;
+ dp_priv->algo.address = 0;
+ dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
+ dp_priv->adapter.owner = THIS_MODULE;
+ dp_priv->adapter.class = I2C_CLASS_DDC;
+ strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1);
+ dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0';
+ dp_priv->adapter.algo_data = &dp_priv->algo;
+ dp_priv->adapter.dev.parent = &intel_output->base.kdev;
+
+ return i2c_dp_aux_add_bus(&dp_priv->adapter);
+}
+
+static bool
+intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_output);
+ int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
+
+ if (intel_dp_link_required(mode->clock) <= link_avail) {
+ dp_priv->link_bw = bws[clock];
+ dp_priv->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ printk(KERN_ERR "link bw %02x lane count %d clock %d\n",
+ dp_priv->link_bw, dp_priv->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+struct intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void
+intel_dp_compute_m_n(int bytes_per_pixel,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = pixel_clock * bytes_per_pixel;
+ m_n->gmch_n = link_clock * nlanes;
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int lane_count = 4;
+ struct intel_dp_m_n m_n;
+
+ /*
+ * Find the lane count in the intel_output private
+ */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = dp_priv->lane_count;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ intel_dp_compute_m_n(3, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ if (intel_crtc->pipe == 0) {
+ I915_WRITE(PIPEA_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEA_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
+ } else {
+ I915_WRITE(PIPEB_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEB_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
+ }
+}
+
+static void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_crtc *crtc = intel_output->enc.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ dp_priv->DP = (DP_LINK_TRAIN_OFF |
+ DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0 |
+ DP_SYNC_VS_HIGH |
+ DP_SYNC_HS_HIGH);
+
+ switch (dp_priv->lane_count) {
+ case 1:
+ dp_priv->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ dp_priv->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ dp_priv->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (dp_priv->has_audio)
+ dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ dp_priv->link_configuration[0] = dp_priv->link_bw;
+ dp_priv->link_configuration[1] = dp_priv->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1,
+ * enable enahanced frame stuff in that case
+ */
+ if (dp_priv->dpcd[0] >= 0x11) {
+ dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ dp_priv->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ if (intel_crtc->pipe == 1)
+ dp_priv->DP |= DP_PIPEB_SELECT;
+}
+
+
+static void
+intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ } else {
+ if (!(dp_reg & DP_PORT_EN))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ }
+ dp_priv->dpms_mode = mode;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read(intel_output,
+ DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE);
+ if (ret != DP_LINK_STATUS_SIZE)
+ return false;
+ return true;
+}
+
+static uint8_t
+intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static void
+intel_dp_save(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ dp_priv->save_DP = I915_READ(dp_priv->output_reg);
+ intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
+ dp_priv->save_link_configuration,
+ sizeof (dp_priv->save_link_configuration));
+}
+
+static uint8_t
+intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
+
+static uint8_t
+intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void
+intel_get_adjust_train(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ uint8_t train_set[4])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= I830_DP_VOLTAGE_MAX)
+ v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= intel_dp_pre_emphasis_max(v))
+ p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+static uint8_t
+intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = intel_dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_output *intel_output,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat,
+ uint8_t train_set[4],
+ bool first)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int ret;
+
+ I915_WRITE(dp_priv->output_reg, dp_reg_value);
+ POSTING_READ(dp_priv->output_reg);
+ if (first)
+ intel_wait_for_vblank(dev);
+
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ ret = intel_dp_aux_native_write(intel_output,
+ DP_TRAINING_LANE0_SET, train_set, 4);
+ if (ret != 4)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ bool channel_eq = false;
+ bool first = true;
+ int tries;
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_output, 0x100,
+ link_configuration, DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+ memset(train_set, 0, 4);
+ voltage = 0xff;
+ tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
+ DP_TRAINING_PATTERN_1, train_set, first))
+ break;
+ first = false;
+ /* Set training pattern 1 */
+
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < dp_priv->lane_count; i++)
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == dp_priv->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ }
+
+ /* channel equalization */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
+ DP_TRAINING_PATTERN_2, train_set,
+ false))
+ break;
+
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5)
+ break;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ ++tries;
+ }
+
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ POSTING_READ(dp_priv->output_reg);
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(dp_priv->output_reg);
+}
+
+static void
+intel_dp_restore(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->save_DP & DP_PORT_EN)
+ intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
+ else
+ intel_dp_link_down(intel_output, dp_priv->save_DP);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+static void
+intel_dp_check_link_status(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_output->enc.crtc)
+ return;
+
+ if (!intel_dp_get_link_status(intel_output, link_status)) {
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ return;
+ }
+
+ if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t temp, bit;
+ enum drm_connector_status status;
+
+ dp_priv->has_audio = false;
+
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(PORT_HOTPLUG_EN,
+ temp |
+ DPB_HOTPLUG_INT_EN |
+ DPC_HOTPLUG_INT_EN |
+ DPD_HOTPLUG_INT_EN);
+
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ switch (dp_priv->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_INT_STATUS;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_INT_STATUS;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_INT_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+
+ if ((temp & bit) == 0)
+ return connector_status_disconnected;
+
+ status = connector_status_disconnected;
+ if (intel_dp_aux_native_read(intel_output,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ {
+ if (dp_priv->dpcd[0] != 0)
+ status = connector_status_connected;
+ }
+ return status;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ return intel_ddc_get_modes(intel_output);
+}
+
+static void
+intel_dp_destroy (struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->i2c_bus)
+ intel_i2c_destroy(intel_output->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .dpms = intel_dp_dpms,
+ .mode_fixup = intel_dp_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_dp_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = intel_dp_save,
+ .restore = intel_dp_restore,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_enc_destroy,
+};
+
+void
+intel_dp_hot_plug(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_output);
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+ struct intel_dp_priv *dp_priv;
+
+ intel_output = kcalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ dp_priv->intel_output = intel_output;
+ dp_priv->output_reg = output_reg;
+ dp_priv->has_audio = false;
+ dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
+ intel_output->dev_priv = dp_priv;
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ intel_dp_i2c_init(intel_output,
+ (output_reg == DP_B) ? "DPDDC-B" :
+ (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D");
+ intel_output->ddc_bus = &dp_priv->adapter;
+ intel_output->hot_plug = intel_dp_hot_plug;
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..2b38054d3b6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DP_H_
+#define _INTEL_DP_H_
+
+/* From the VESA DisplayPort spec */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
new file mode 100644
index 000000000000..4e60f14b1a6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_i2c.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include "intel_dp.h"
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = (*algo_data->aux_ch)(adapter,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ printk(KERN_ERR "aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ printk(KERN_ERR "aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ printk(KERN_ERR "aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ printk(KERN_ERR "aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ printk(KERN_ERR "dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd4b9c5f715e..004541c935a8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -54,6 +54,7 @@
#define INTEL_OUTPUT_LVDS 4
#define INTEL_OUTPUT_TVOUT 5
#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -65,7 +66,6 @@ struct intel_i2c_chan {
u32 reg; /* GPIO reg */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
};
struct intel_framebuffer {
@@ -79,11 +79,12 @@ struct intel_output {
struct drm_encoder enc;
int type;
- struct intel_i2c_chan *i2c_bus; /* for control functions */
- struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+ struct i2c_adapter *i2c_bus;
+ struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
+ void (*hot_plug)(struct intel_output *);
};
struct intel_crtc {
@@ -104,9 +105,9 @@ struct intel_crtc {
#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct intel_i2c_chan *chan);
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name);
+void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
@@ -116,6 +117,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1ee3007d6ec0..13bff20930e8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev)
{
struct intel_output *intel_output;
struct intel_dvo_device *dvo;
- struct intel_i2c_chan *i2cbus = NULL;
+ struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
- int gpio_inited = 0;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (gpio_inited != gpio) {
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
- gpio_inited = gpio;
+ if (i2cbus != NULL)
+ intel_i2c_destroy(i2cbus);
+ if (!(i2cbus = intel_i2c_create(dev, gpio,
+ gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+ continue;
}
if (dvo->dev_ops!= NULL)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4ea2a651b92c..9e30daae37dc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox = SDVO_ENCODING_HDMI |
SDVO_BORDER_ENABLE |
SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH |
- SDVO_NULL_PACKETS_DURING_VSYNC;
+ SDVO_HSYNC_ACTIVE_HIGH;
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -129,20 +129,26 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void
-intel_hdmi_sink_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_hdmi_edid_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- if (edid != NULL) {
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
- kfree(edid);
+ intel_output->ddc_bus);
+ hdmi_priv->has_hdmi_sink = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ }
intel_output->base.display_info.raw_edid = NULL;
+ kfree(edid);
}
+ return status;
}
static enum drm_connector_status
@@ -154,11 +160,7 @@ igdng_hdmi_detect(struct drm_connector *connector)
/* FIXME hotplug detect */
hdmi_priv->has_hdmi_sink = false;
- intel_hdmi_sink_detect(connector);
- if (hdmi_priv->has_hdmi_sink)
- return connector_status_connected;
- else
- return connector_status_disconnected;
+ return intel_hdmi_edid_detect(connector);
}
static enum drm_connector_status
@@ -201,10 +203,9 @@ intel_hdmi_detect(struct drm_connector *connector)
return connector_status_unknown;
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
- intel_hdmi_sink_detect(connector);
- return connector_status_connected;
- } else
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
+ return intel_hdmi_edid_detect(connector);
+ else
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f7061f68d050..62b8bead7652 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high)
* @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
+ * @slave_addr: slave address (if fixed)
*
* Creates and registers a new i2c bus with the Linux i2c layer, for use
* in output probing and control (e.g. DDC or SDVO control functions).
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high)
* %GPIOH
* see PRM for details on how these different busses are used.
*/
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name)
{
struct intel_i2c_chan *chan;
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
intel_i2c_quirk_set(dev, false);
udelay(20);
- return chan;
+ return &chan->adapter;
out_free:
kfree(chan);
@@ -187,11 +188,16 @@ out_free:
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
-void intel_i2c_destroy(struct intel_i2c_chan *chan)
+void intel_i2c_destroy(struct i2c_adapter *adapter)
{
- if (!chan)
+ struct intel_i2c_chan *chan;
+
+ if (!adapter)
return;
+ chan = container_of(adapter,
+ struct intel_i2c_chan,
+ adapter);
i2c_del_adapter(&chan->adapter);
kfree(chan);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f073ed8432e8..9564ca44a977 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -39,6 +39,21 @@
#define I915_LVDS "i915_lvds"
+/*
+ * the following four scaling options are defined.
+ * #define DRM_MODE_SCALE_NON_GPU 0
+ * #define DRM_MODE_SCALE_FULLSCREEN 1
+ * #define DRM_MODE_SCALE_NO_SCALE 2
+ * #define DRM_MODE_SCALE_ASPECT 3
+ */
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_priv {
+ int fitting_mode;
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
+};
+
/**
* Sets the backlight level.
*
@@ -213,10 +228,27 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ /*
+ * float point operation is not supported . So the PANEL_RATIO_FACTOR
+ * is defined, which can avoid the float point computation when
+ * calculating the panel ratio.
+ */
+#define PANEL_RATIO_FACTOR 8192
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0;
+ int left_border = 0, right_border = 0, top_border = 0;
+ int bottom_border = 0;
+ bool border = 0;
+ int panel_ratio, desired_ratio, vert_scale, horiz_scale;
+ int horiz_ratio, vert_ratio;
+ u32 hsync_width, vsync_width;
+ u32 hblank_width, vblank_width;
+ u32 hsync_pos, vsync_pos;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -232,7 +264,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
}
-
+ /* If we don't have a panel mode, there is nothing we can do */
+ if (dev_priv->panel_fixed_mode == NULL)
+ return true;
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -256,6 +290,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
+ /* Make sure pre-965s set dither correctly */
+ if (!IS_I965G(dev)) {
+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay) {
+ pfit_pgm_ratios = 0;
+ border = 0;
+ goto out;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ if (IS_I965G(dev))
+ pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY;
+
+ hsync_width = adjusted_mode->crtc_hsync_end -
+ adjusted_mode->crtc_hsync_start;
+ vsync_width = adjusted_mode->crtc_vsync_end -
+ adjusted_mode->crtc_vsync_start;
+ hblank_width = adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start;
+ vblank_width = adjusted_mode->crtc_vblank_end -
+ adjusted_mode->crtc_vblank_start;
+ /*
+ * Deal with panel fitting options. Figure out how to stretch the
+ * image based on its aspect ratio & the current panel fitting mode.
+ */
+ panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
+ adjusted_mode->vdisplay;
+ desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
+ mode->vdisplay;
+ /*
+ * Enable automatic panel scaling for non-native modes so that they fill
+ * the screen. Should be enabled before the pipe is enabled, according
+ * to register description and PRM.
+ * Change the value here to see the borders for debugging
+ */
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+
+ switch (lvds_priv->fitting_mode) {
+ case DRM_MODE_SCALE_NO_SCALE:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1)
+ right_border++;
+ top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ /* Set active & border values */
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ /* Keep the boder be even */
+ if (right_border & 1)
+ right_border++;
+ /* use the border directly instead of border minuse one */
+ adjusted_mode->crtc_hblank_start = mode->hdisplay +
+ right_border;
+ /* keep the blank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start + hblank_width;
+ /* get the hsync pos relative to hblank start */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start + hsync_pos;
+ /* keep the hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start + hsync_width;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
+ /* use the border instead of border minus one */
+ adjusted_mode->crtc_vblank_start = mode->vdisplay +
+ bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start + vblank_width;
+ /* get the vsync start postion relative to vblank start */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start + vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vblank_start + vsync_width;
+ border = 1;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the spect ratio */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev)) {
+ /* 965+ is easy, it does everything in hw */
+ if (panel_ratio > desired_ratio)
+ pfit_control |= PFIT_SCALING_PILLAR;
+ else if (panel_ratio < desired_ratio)
+ pfit_control |= PFIT_SCALING_LETTER;
+ else
+ pfit_control |= PFIT_SCALING_AUTO;
+ } else {
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ u32 horiz_bits, vert_bits, bits = 12;
+ horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->hdisplay;
+ vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->vdisplay;
+ horiz_scale = adjusted_mode->hdisplay *
+ PANEL_RATIO_FACTOR / mode->hdisplay;
+ vert_scale = adjusted_mode->vdisplay *
+ PANEL_RATIO_FACTOR / mode->vdisplay;
+
+ /* retain aspect ratio */
+ if (panel_ratio > desired_ratio) { /* Pillar */
+ u32 scaled_width;
+ scaled_width = mode->hdisplay * vert_scale /
+ PANEL_RATIO_FACTOR;
+ horiz_ratio = vert_ratio;
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Pillar will have left/right borders */
+ left_border = (adjusted_mode->hdisplay -
+ scaled_width) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1) /* odd resolutions */
+ right_border++;
+ /* keep the border be even */
+ if (right_border & 1)
+ right_border++;
+ adjusted_mode->crtc_hdisplay = scaled_width;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_hblank_start =
+ scaled_width + right_border;
+ /* keep the hblank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start +
+ hblank_width;
+ /*
+ * get the hsync start pos relative to
+ * hblank start
+ */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync_pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start +
+ hsync_pos;
+ /* keept hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start +
+ hsync_width;
+ border = 1;
+ } else if (panel_ratio < desired_ratio) { /* letter */
+ u32 scaled_height = mode->vdisplay *
+ horiz_scale / PANEL_RATIO_FACTOR;
+ vert_ratio = horiz_ratio;
+ pfit_control |= (HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Letterbox will have top/bottom border */
+ top_border = (adjusted_mode->vdisplay -
+ scaled_height) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ adjusted_mode->crtc_vdisplay = scaled_height;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_vblank_start =
+ scaled_height + bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start +
+ vblank_width;
+ /*
+ * get the vsync start pos relative to
+ * vblank start
+ */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start +
+ vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vsync_start +
+ vsync_width;
+ border = 1;
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ horiz_bits = (1 << bits) * horiz_ratio /
+ PANEL_RATIO_FACTOR;
+ vert_bits = (1 << bits) * vert_ratio /
+ PANEL_RATIO_FACTOR;
+ pfit_pgm_ratios =
+ ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
+ PFIT_VERT_SCALE_MASK) |
+ ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
+ PFIT_HORIZ_SCALE_MASK);
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev))
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ break;
+ default:
+ break;
+ }
+
+out:
+ lvds_priv->pfit_control = pfit_control;
+ lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@@ -301,8 +572,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 pfit_control;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -319,22 +590,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- if (mode->hdisplay != adjusted_mode->hdisplay ||
- mode->vdisplay != adjusted_mode->vdisplay)
- pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- else
- pfit_control = 0;
-
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
- else
- pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
-
- I915_WRITE(PFIT_CONTROL, pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/**
@@ -406,6 +663,34 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output =
+ to_intel_output(connector);
+
+ if (property == dev->mode_config.scaling_mode_property &&
+ connector->encoder) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ if (value == DRM_MODE_SCALE_NON_GPU) {
+ DRM_DEBUG_KMS(I915_LVDS,
+ "non_GPU property is unsupported\n");
+ return 0;
+ }
+ if (lvds_priv->fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ lvds_priv->fitting_mode = value;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+ }
+
return 0;
}
@@ -456,7 +741,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
},
},
@@ -464,7 +749,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core 2 series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
},
},
@@ -518,6 +803,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
+ struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -531,7 +817,8 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ intel_output = kzalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_lvds_priv), GFP_KERNEL);
if (!intel_output) {
return;
}
@@ -553,7 +840,18 @@ void intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
+ intel_output->dev_priv = lvds_priv;
+ /* create the scaling mode property */
+ drm_mode_create_scaling_mode_property(dev);
+ /*
+ * the initial panel fitting mode will be FULL_SCREEN.
+ */
+ drm_connector_attach_property(&intel_output->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -649,5 +947,5 @@ failed:
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index e0910fefce87..67e2f4632a24 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
}
};
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
-
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (ret == 2)
return true;
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (edid) {
drm_mode_connector_update_edid_property(&intel_output->base,
edid);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9a00adb3a508..f03473779feb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -38,8 +38,7 @@
#undef SDVO_DEBUG
#define I915_SDVO "i915_sdvo"
struct intel_sdvo_priv {
- struct intel_i2c_chan *i2c_bus;
- int slaveaddr;
+ u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
int output_device;
@@ -146,13 +145,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -162,7 +161,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
out_buf[0] = addr;
out_buf[1] = 0;
- if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -175,10 +174,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
u8 ch)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_output->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -188,7 +188,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+ if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
{
return true;
}
@@ -1369,9 +1369,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct edid *edid = NULL;
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
+ intel_output->ddc_bus);
if (edid != NULL) {
sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
kfree(edid);
@@ -1549,7 +1548,6 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
/*
@@ -1557,8 +1555,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- /* set the bus switch and get the modes */
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
intel_ddc_get_modes(intel_output);
if (list_empty(&connector->probed_modes) == false)
return;
@@ -1709,7 +1705,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == chan) {
+ if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
intel_output = to_intel_output(connector);
break;
}
@@ -1723,7 +1719,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
- struct i2c_algorithm *algo;
+ const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
intel_output =
@@ -1733,7 +1729,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
return -EINVAL;
sdvo_priv = intel_output->dev_priv;
- algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
+ algo = intel_output->i2c_bus->algo;
intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
@@ -1785,13 +1781,11 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
- struct intel_i2c_chan *i2cbus = NULL;
- struct intel_i2c_chan *ddcbus = NULL;
+
int connector_type;
u8 ch[0x40];
int i;
- int encoder_type, output_id;
- u8 slave_addr;
+ int encoder_type;
intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_output) {
@@ -1799,29 +1793,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
}
sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+ sdvo_priv->output_device = output_device;
+
+ intel_output->dev_priv = sdvo_priv;
intel_output->type = INTEL_OUTPUT_SDVO;
/* setup the DDC bus. */
if (output_device == SDVOB)
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
else
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
- if (!i2cbus)
+ if (!intel_output->i2c_bus)
goto err_inteloutput;
- slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- sdvo_priv->i2c_bus = i2cbus;
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- if (output_device == SDVOB) {
- output_id = 1;
- } else {
- output_id = 2;
- }
- sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
- sdvo_priv->output_device = output_device;
- intel_output->i2c_bus = i2cbus;
- intel_output->dev_priv = sdvo_priv;
+ /* Save the bit-banging i2c functionality for use by the DDC wrapper */
+ intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
@@ -1835,17 +1824,15 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
/* setup the DDC bus. */
if (output_device == SDVOB)
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
else
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- if (ddcbus == NULL)
+ if (intel_output->ddc_bus == NULL)
goto err_i2c;
- intel_sdvo_i2c_bit_algo.functionality =
- intel_output->i2c_bus->adapter.algo->functionality;
- ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
- intel_output->ddc_bus = ddcbus;
+ /* Wrap with our custom algo which switches to DDC mode */
+ intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
/* In defaut case sdvo lvds is false */
sdvo_priv->is_lvds = false;
@@ -1965,9 +1952,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
return true;
err_i2c:
- if (ddcbus != NULL)
+ if (intel_output->ddc_bus != NULL)
intel_i2c_destroy(intel_output->ddc_bus);
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_output->i2c_bus != NULL)
+ intel_i2c_destroy(intel_output->i2c_bus);
err_inteloutput:
kfree(intel_output);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea68992e4416..a43c98e3f077 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
/*
* Detect TV by polling)
*/
- if (intel_output->load_detect_temp) {
- /* TV not currently running, prod it with destructive detect */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
- tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
- tv_dac |= (TVDAC_STATE_CHG_EN |
- TVDAC_A_SENSE_CTL |
- TVDAC_B_SENSE_CTL |
- TVDAC_C_SENSE_CTL |
- DAC_CTL_OVERRIDE |
- DAC_A_0_7_V |
- DAC_B_0_7_V |
- DAC_C_0_7_V);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
- }
+ save_tv_dac = tv_dac;
+ tv_ctl = I915_READ(TV_CTL);
+ save_tv_ctl = tv_ctl;
+ tv_ctl &= ~TV_ENC_ENABLE;
+ tv_ctl &= ~TV_TEST_MODE_MASK;
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ tv_dac &= ~TVDAC_SENSE_MASK;
+ tv_dac &= ~DAC_A_MASK;
+ tv_dac &= ~DAC_B_MASK;
+ tv_dac &= ~DAC_C_MASK;
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ intel_wait_for_vblank(dev);
+ tv_dac = I915_READ(TV_DAC);
+ I915_WRITE(TV_DAC, save_tv_dac);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ intel_wait_for_vblank(dev);
/*
* A B C
* 0 1 1 Composite
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f30aa7274a54..f97563db4e59 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,23 @@
#include "atom.h"
/*
+ * Clear GPU surface registers.
+ */
+static void radeon_surface_init(struct radeon_device *rdev)
+{
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R600) {
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ WREG32(RADEON_SURFACE0_INFO +
+ i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
+ 0);
+ }
+ }
+}
+
+/*
* GPU scratch registers helpers function.
*/
static void radeon_scratch_init(struct radeon_device *rdev)
@@ -496,6 +513,8 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_errata(rdev);
/* Initialize scratch registers */
radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
@@ -604,9 +623,6 @@ int radeon_device_init(struct radeon_device *rdev,
if (r) {
return r;
}
- if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
- rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
- }
if (!ret) {
DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 09c9fb9f6210..84ba69f48784 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -345,7 +345,7 @@ static void __exit radeon_exit(void)
drm_exit(driver);
}
-late_initcall(radeon_init);
+module_init(radeon_init);
module_exit(radeon_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fa86d398945e..9e8f191eb64a 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -478,14 +478,16 @@ int radeonfb_create(struct radeon_device *rdev,
{
struct fb_info *info;
struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL;
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_object *robj = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
+ u64 fb_gpuaddr;
void *fbptr = NULL;
+ unsigned long tmp;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -498,11 +500,12 @@ int radeonfb_create(struct radeon_device *rdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- false, &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ false, &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer\n");
+ printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
+ surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
@@ -515,12 +518,19 @@ int radeonfb_create(struct radeon_device *rdev,
ret = -ENOMEM;
goto out_unref;
}
+ ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ printk(KERN_ERR "failed to pin framebuffer\n");
+ ret = -ENOMEM;
+ goto out_unref;
+ }
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
rfb = to_radeon_framebuffer(fb);
*rfb_p = rfb;
rdev->fbdev_rfb = rfb;
+ rdev->fbdev_robj = robj;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -541,13 +551,13 @@ int radeonfb_create(struct radeon_device *rdev,
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_I830;
+ info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
info->fix.line_length = fb->pitch;
- info->screen_base = fbptr;
- info->fix.smem_start = (unsigned long)fbptr;
+ tmp = fb_gpuaddr - rdev->mc.vram_location;
+ info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
info->screen_size = size;
@@ -562,8 +572,8 @@ int radeonfb_create(struct radeon_device *rdev,
info->var.width = -1;
info->var.xres = fb_width;
info->var.yres = fb_height;
- info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
- info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -644,7 +654,7 @@ out_unref:
if (robj) {
radeon_object_kunmap(robj);
}
- if (ret) {
+ if (fb && ret) {
list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
@@ -813,6 +823,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
robj = rfb->obj->driver_private;
unregister_framebuffer(info);
radeon_object_kunmap(robj);
+ radeon_object_unpin(robj);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 983e8df5e000..bac0d06c52ac 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -223,7 +223,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
{
uint32_t flags;
uint32_t tmp;
- void *fbptr;
int r;
flags = radeon_object_flags_from_domain(domain);
@@ -242,10 +241,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
return r;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
tmp = robj->tobj.mem.placement;
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
@@ -261,23 +256,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
DRM_ERROR("radeon: failed to pin object.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
return r;
}
void radeon_object_unpin(struct radeon_object *robj)
{
uint32_t flags;
- void *fbptr;
int r;
spin_lock(&robj->tobj.lock);
@@ -297,10 +281,6 @@ void radeon_object_unpin(struct radeon_object *robj)
DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
return;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
flags = robj->tobj.mem.placement;
robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&robj->tobj,
@@ -310,16 +290,6 @@ void radeon_object_unpin(struct radeon_object *robj)
DRM_ERROR("radeon: failed to unpin buffer.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
}
int radeon_object_wait(struct radeon_object *robj)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 517c84559633..bdec583901eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -34,7 +34,6 @@
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/module.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 27b146c54fbc..40b75032ea47 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -32,7 +32,6 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <linux/mm.h>
-#include <linux/version.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0331fa74cd3f..75dc8bd24592 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/highmem.h>
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7831a0318d3c..051112b46d74 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -31,21 +31,6 @@ config HID
If unsure, say Y.
-config HID_DEBUG
- bool "HID debugging support"
- default y
- depends on HID
- ---help---
- This option lets the HID layer output diagnostics about its internal
- state, resolve HID usages, dump HID fields, etc. Individual HID drivers
- use this debugging facility to output information about individual HID
- devices, etc.
-
- This feature is useful for those who are either debugging the HID parser
- or any HID hardware device.
-
- If unsure, say Y.
-
config HIDRAW
bool "/dev/hidraw raw HID device support"
depends on HID
@@ -176,6 +161,7 @@ config LOGITECH_FF
- Logitech WingMan Cordless RumblePad 2
- Logitech WingMan Force 3D
- Logitech Formula Force EX
+ - Logitech WingMan Formula Force GP
- Logitech MOMO Force wheel
and if you want to enable force feedback for them.
@@ -314,9 +300,9 @@ config THRUSTMASTER_FF
depends on HID_THRUSTMASTER
select INPUT_FF_MEMLESS
---help---
- Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
- a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel and
- want to enable force feedback support for it.
+ Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or 3,
+ a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
+ Rumble Force or Force Feedback Wheel.
config HID_WACOM
tristate "Wacom Bluetooth devices support" if EMBEDDED
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index db35151673b1..a06525d6b0f9 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -3,9 +3,12 @@
#
hid-objs := hid-core.o hid-input.o
+ifdef CONFIG_DEBUG_FS
+ hid-objs += hid-debug.o
+endif
+
obj-$(CONFIG_HID) += hid.o
-hid-$(CONFIG_HID_DEBUG) += hid-debug.o
hid-$(CONFIG_HIDRAW) += hidraw.o
hid-logitech-objs := hid-lg.o
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 42ea359e94cf..df474c699fb8 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -145,12 +145,12 @@ static struct hid_driver a4_driver = {
.remove = a4_remove,
};
-static int a4_init(void)
+static int __init a4_init(void)
{
return hid_register_driver(&a4_driver);
}
-static void a4_exit(void)
+static void __exit a4_exit(void)
{
hid_unregister_driver(&a4_driver);
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 303ccce05bb3..4b96e7a898cf 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -451,7 +451,7 @@ static struct hid_driver apple_driver = {
.input_mapped = apple_input_mapped,
};
-static int apple_init(void)
+static int __init apple_init(void)
{
int ret;
@@ -462,7 +462,7 @@ static int apple_init(void)
return ret;
}
-static void apple_exit(void)
+static void __exit apple_exit(void)
{
hid_unregister_driver(&apple_driver);
}
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index 2f6723133a4b..4ce7aa3a519f 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -88,12 +88,12 @@ static struct hid_driver belkin_driver = {
.probe = belkin_probe,
};
-static int belkin_init(void)
+static int __init belkin_init(void)
{
return hid_register_driver(&belkin_driver);
}
-static void belkin_exit(void)
+static void __exit belkin_exit(void)
{
hid_unregister_driver(&belkin_driver);
}
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index ab8209e7e45c..7e597d7f770f 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -70,12 +70,12 @@ static struct hid_driver ch_driver = {
.input_mapping = ch_input_mapping,
};
-static int ch_init(void)
+static int __init ch_init(void)
{
return hid_register_driver(&ch_driver);
}
-static void ch_exit(void)
+static void __exit ch_exit(void)
{
hid_unregister_driver(&ch_driver);
}
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index 7f91076d8493..8965ad93d510 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -63,12 +63,12 @@ static struct hid_driver ch_driver = {
.input_mapping = ch_input_mapping,
};
-static int ch_init(void)
+static int __init ch_init(void)
{
return hid_register_driver(&ch_driver);
}
-static void ch_exit(void)
+static void __exit ch_exit(void)
{
hid_unregister_driver(&ch_driver);
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f2c21d5d24e8..f0fbac1f249d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -44,12 +44,10 @@
#define DRIVER_DESC "HID core driver"
#define DRIVER_LICENSE "GPL"
-#ifdef CONFIG_HID_DEBUG
int hid_debug = 0;
module_param_named(debug, hid_debug, int, 0600);
-MODULE_PARM_DESC(debug, "HID debugging (0=off, 1=probing info, 2=continuous data dumping)");
+MODULE_PARM_DESC(debug, "toggle HID debugging messages");
EXPORT_SYMBOL_GPL(hid_debug);
-#endif
/*
* Register a new report for a device.
@@ -861,7 +859,7 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
struct hid_driver *hdrv = hid->driver;
int ret;
- hid_dump_input(usage, value);
+ hid_dump_input(hid, usage, value);
if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
ret = hdrv->event(hid, field, usage, value);
@@ -983,11 +981,10 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
{
unsigned size = field->report_size;
- hid_dump_input(field->usage + offset, value);
+ hid_dump_input(field->report->device, field->usage + offset, value);
if (offset >= field->report_count) {
dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
- hid_dump_field(field, 8);
return -1;
}
if (field->logical_minimum < 0) {
@@ -1078,6 +1075,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_driver *hdrv = hid->driver;
struct hid_report *report;
+ char *buf;
unsigned int i;
int ret;
@@ -1089,18 +1087,38 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
return -1;
}
- dbg_hid("report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
+ buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE,
+ interrupt ? GFP_ATOMIC : GFP_KERNEL);
+
+ if (!buf) {
+ report = hid_get_report(report_enum, data);
+ goto nomem;
+ }
+
+ snprintf(buf, HID_DEBUG_BUFSIZE - 1,
+ "\nreport (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
+ hid_debug_event(hid, buf);
report = hid_get_report(report_enum, data);
- if (!report)
+ if (!report) {
+ kfree(buf);
return -1;
+ }
/* dump the report */
- dbg_hid("report %d (size %u) = ", report->id, size);
- for (i = 0; i < size; i++)
- dbg_hid_line(" %02x", data[i]);
- dbg_hid_line("\n");
+ snprintf(buf, HID_DEBUG_BUFSIZE - 1,
+ "report %d (size %u) = ", report->id, size);
+ hid_debug_event(hid, buf);
+ for (i = 0; i < size; i++) {
+ snprintf(buf, HID_DEBUG_BUFSIZE - 1,
+ " %02x", data[i]);
+ hid_debug_event(hid, buf);
+ }
+ hid_debug_event(hid, "\n");
+ kfree(buf);
+
+nomem:
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
if (ret != 0)
@@ -1290,6 +1308,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
@@ -1309,6 +1328,8 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
@@ -1723,6 +1744,8 @@ int hid_add_device(struct hid_device *hdev)
if (!ret)
hdev->status |= HID_STAT_ADDED;
+ hid_debug_register(hdev, dev_name(&hdev->dev));
+
return ret;
}
EXPORT_SYMBOL_GPL(hid_add_device);
@@ -1759,6 +1782,9 @@ struct hid_device *hid_allocate_device(void)
for (i = 0; i < HID_REPORT_TYPES; i++)
INIT_LIST_HEAD(&hdev->report_enum[i].report_list);
+ init_waitqueue_head(&hdev->debug_wait);
+ INIT_LIST_HEAD(&hdev->debug_list);
+
return hdev;
err:
put_device(&hdev->dev);
@@ -1770,6 +1796,7 @@ static void hid_remove_device(struct hid_device *hdev)
{
if (hdev->status & HID_STAT_ADDED) {
device_del(&hdev->dev);
+ hid_debug_unregister(hdev);
hdev->status &= ~HID_STAT_ADDED;
}
}
@@ -1845,6 +1872,10 @@ static int __init hid_init(void)
{
int ret;
+ if (hid_debug)
+ printk(KERN_WARNING "HID: hid_debug is now used solely for parser and driver debugging.\n"
+ "HID: debugfs is now used for inspecting the device (report descriptor, reports)\n");
+
ret = bus_register(&hid_bus_type);
if (ret) {
printk(KERN_ERR "HID: can't register hid bus\n");
@@ -1855,6 +1886,8 @@ static int __init hid_init(void)
if (ret)
goto err_bus;
+ hid_debug_init();
+
return 0;
err_bus:
bus_unregister(&hid_bus_type);
@@ -1864,6 +1897,7 @@ err:
static void __exit hid_exit(void)
{
+ hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
}
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 9d6d3b91773b..62e9cb10e88c 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -141,12 +141,12 @@ static struct hid_driver cp_driver = {
.probe = cp_probe,
};
-static int cp_init(void)
+static int __init cp_init(void)
{
return hid_register_driver(&cp_driver);
}
-static void cp_exit(void)
+static void __exit cp_exit(void)
{
hid_unregister_driver(&cp_driver);
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 04359ed64b87..3e425f78ba3a 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1,9 +1,9 @@
/*
* (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
* (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
- * (c) 2007 Jiri Kosina
+ * (c) 2007-2009 Jiri Kosina
*
- * Some debug stuff for the HID parser.
+ * HID debugging support
*/
/*
@@ -26,9 +26,17 @@
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+
#include <linux/hid.h>
#include <linux/hid-debug.h>
+static struct dentry *hid_debug_root;
+
struct hid_usage_entry {
unsigned page;
unsigned usage;
@@ -339,72 +347,120 @@ static const struct hid_usage_entry hid_usage_table[] = {
{ 0, 0, NULL }
};
-static void resolv_usage_page(unsigned page) {
+/* Either output directly into simple seq_file, or (if f == NULL)
+ * allocate a separate buffer that will then be passed to the 'events'
+ * ringbuffer.
+ *
+ * This is because these functions can be called both for "one-shot"
+ * "rdesc" while resolving, or for blocking "events".
+ *
+ * This holds both for resolv_usage_page() and hid_resolv_usage().
+ */
+static char *resolv_usage_page(unsigned page, struct seq_file *f) {
const struct hid_usage_entry *p;
+ char *buf = NULL;
+
+ if (!f) {
+ buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ }
for (p = hid_usage_table; p->description; p++)
if (p->page == page) {
- printk("%s", p->description);
- return;
+ if (!f) {
+ snprintf(buf, HID_DEBUG_BUFSIZE, "%s",
+ p->description);
+ return buf;
+ }
+ else {
+ seq_printf(f, "%s", p->description);
+ return NULL;
+ }
}
- printk("%04x", page);
+ if (!f)
+ snprintf(buf, HID_DEBUG_BUFSIZE, "%04x", page);
+ else
+ seq_printf(f, "%04x", page);
+ return buf;
}
-void hid_resolv_usage(unsigned usage) {
+char *hid_resolv_usage(unsigned usage, struct seq_file *f) {
const struct hid_usage_entry *p;
+ char *buf = NULL;
+ int len = 0;
+
+ buf = resolv_usage_page(usage >> 16, f);
+ if (IS_ERR(buf)) {
+ printk(KERN_ERR "error allocating HID debug buffer\n");
+ return NULL;
+ }
- if (!hid_debug)
- return;
- resolv_usage_page(usage >> 16);
- printk(".");
+ if (!f) {
+ len = strlen(buf);
+ snprintf(buf+len, max(0, HID_DEBUG_BUFSIZE - len), ".");
+ len++;
+ }
+ else {
+ seq_printf(f, ".");
+ }
for (p = hid_usage_table; p->description; p++)
if (p->page == (usage >> 16)) {
for(++p; p->description && p->usage != 0; p++)
if (p->usage == (usage & 0xffff)) {
- printk("%s", p->description);
- return;
+ if (!f)
+ snprintf(buf + len,
+ max(0,HID_DEBUG_BUFSIZE - len - 1),
+ "%s", p->description);
+ else
+ seq_printf(f,
+ "%s",
+ p->description);
+ return buf;
}
break;
}
- printk("%04x", usage & 0xffff);
+ if (!f)
+ snprintf(buf + len, max(0, HID_DEBUG_BUFSIZE - len - 1),
+ "%04x", usage & 0xffff);
+ else
+ seq_printf(f, "%04x", usage & 0xffff);
+ return buf;
}
EXPORT_SYMBOL_GPL(hid_resolv_usage);
-static void tab(int n) {
- printk(KERN_DEBUG "%*s", n, "");
+static void tab(int n, struct seq_file *f) {
+ seq_printf(f, "%*s", n, "");
}
-void hid_dump_field(struct hid_field *field, int n) {
+void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) {
int j;
- if (!hid_debug)
- return;
-
if (field->physical) {
- tab(n);
- printk("Physical(");
- hid_resolv_usage(field->physical); printk(")\n");
+ tab(n, f);
+ seq_printf(f, "Physical(");
+ hid_resolv_usage(field->physical, f); seq_printf(f, ")\n");
}
if (field->logical) {
- tab(n);
- printk("Logical(");
- hid_resolv_usage(field->logical); printk(")\n");
+ tab(n, f);
+ seq_printf(f, "Logical(");
+ hid_resolv_usage(field->logical, f); seq_printf(f, ")\n");
}
- tab(n); printk("Usage(%d)\n", field->maxusage);
+ tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage);
for (j = 0; j < field->maxusage; j++) {
- tab(n+2); hid_resolv_usage(field->usage[j].hid); printk("\n");
+ tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n");
}
if (field->logical_minimum != field->logical_maximum) {
- tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
- tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
+ tab(n, f); seq_printf(f, "Logical Minimum(%d)\n", field->logical_minimum);
+ tab(n, f); seq_printf(f, "Logical Maximum(%d)\n", field->logical_maximum);
}
if (field->physical_minimum != field->physical_maximum) {
- tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
- tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
+ tab(n, f); seq_printf(f, "Physical Minimum(%d)\n", field->physical_minimum);
+ tab(n, f); seq_printf(f, "Physical Maximum(%d)\n", field->physical_maximum);
}
if (field->unit_exponent) {
- tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
+ tab(n, f); seq_printf(f, "Unit Exponent(%d)\n", field->unit_exponent);
}
if (field->unit) {
static const char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
@@ -425,77 +481,75 @@ void hid_dump_field(struct hid_field *field, int n) {
data >>= 4;
if(sys > 4) {
- tab(n); printk("Unit(Invalid)\n");
+ tab(n, f); seq_printf(f, "Unit(Invalid)\n");
}
else {
int earlier_unit = 0;
- tab(n); printk("Unit(%s : ", systems[sys]);
+ tab(n, f); seq_printf(f, "Unit(%s : ", systems[sys]);
for (i=1 ; i<sizeof(__u32)*2 ; i++) {
char nibble = data & 0xf;
data >>= 4;
if (nibble != 0) {
if(earlier_unit++ > 0)
- printk("*");
- printk("%s", units[sys][i]);
+ seq_printf(f, "*");
+ seq_printf(f, "%s", units[sys][i]);
if(nibble != 1) {
/* This is a _signed_ nibble(!) */
int val = nibble & 0x7;
if(nibble & 0x08)
val = -((0x7 & ~val) +1);
- printk("^%d", val);
+ seq_printf(f, "^%d", val);
}
}
}
- printk(")\n");
+ seq_printf(f, ")\n");
}
}
- tab(n); printk("Report Size(%u)\n", field->report_size);
- tab(n); printk("Report Count(%u)\n", field->report_count);
- tab(n); printk("Report Offset(%u)\n", field->report_offset);
+ tab(n, f); seq_printf(f, "Report Size(%u)\n", field->report_size);
+ tab(n, f); seq_printf(f, "Report Count(%u)\n", field->report_count);
+ tab(n, f); seq_printf(f, "Report Offset(%u)\n", field->report_offset);
- tab(n); printk("Flags( ");
+ tab(n, f); seq_printf(f, "Flags( ");
j = field->flags;
- printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
- printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
- printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
- printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
- printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
- printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPreferredState " : "");
- printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
- printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
- printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
- printk(")\n");
+ seq_printf(f, "%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
+ seq_printf(f, "%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
+ seq_printf(f, "%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
+ seq_printf(f, "%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
+ seq_printf(f, "%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
+ seq_printf(f, "%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPreferredState " : "");
+ seq_printf(f, "%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
+ seq_printf(f, "%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
+ seq_printf(f, "%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
+ seq_printf(f, ")\n");
}
EXPORT_SYMBOL_GPL(hid_dump_field);
-void hid_dump_device(struct hid_device *device) {
+void hid_dump_device(struct hid_device *device, struct seq_file *f)
+{
struct hid_report_enum *report_enum;
struct hid_report *report;
struct list_head *list;
unsigned i,k;
static const char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
- if (!hid_debug)
- return;
-
for (i = 0; i < HID_REPORT_TYPES; i++) {
report_enum = device->report_enum + i;
list = report_enum->report_list.next;
while (list != &report_enum->report_list) {
report = (struct hid_report *) list;
- tab(2);
- printk("%s", table[i]);
+ tab(2, f);
+ seq_printf(f, "%s", table[i]);
if (report->id)
- printk("(%d)", report->id);
- printk("[%s]", table[report->type]);
- printk("\n");
+ seq_printf(f, "(%d)", report->id);
+ seq_printf(f, "[%s]", table[report->type]);
+ seq_printf(f, "\n");
for (k = 0; k < report->maxfield; k++) {
- tab(4);
- printk("Field(%d)\n", k);
- hid_dump_field(report->field[k], 6);
+ tab(4, f);
+ seq_printf(f, "Field(%d)\n", k);
+ hid_dump_field(report->field[k], 6, f);
}
list = list->next;
}
@@ -503,13 +557,37 @@ void hid_dump_device(struct hid_device *device) {
}
EXPORT_SYMBOL_GPL(hid_dump_device);
-void hid_dump_input(struct hid_usage *usage, __s32 value) {
- if (hid_debug < 2)
+/* enqueue string to 'events' ring buffer */
+void hid_debug_event(struct hid_device *hdev, char *buf)
+{
+ int i;
+ struct hid_debug_list *list;
+
+ list_for_each_entry(list, &hdev->debug_list, node) {
+ for (i = 0; i <= strlen(buf); i++)
+ list->hid_debug_buf[(list->tail + i) % (HID_DEBUG_BUFSIZE - 1)] =
+ buf[i];
+ list->tail = (list->tail + i) % (HID_DEBUG_BUFSIZE - 1);
+ }
+}
+EXPORT_SYMBOL_GPL(hid_debug_event);
+
+void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 value)
+{
+ char *buf;
+ int len;
+
+ buf = hid_resolv_usage(usage->hid, NULL);
+ if (!buf)
return;
+ len = strlen(buf);
+ snprintf(buf + len, HID_DEBUG_BUFSIZE - len - 1, " = %d\n", value);
+
+ hid_debug_event(hdev, buf);
+
+ kfree(buf);
+ wake_up_interruptible(&hdev->debug_wait);
- printk(KERN_DEBUG "hid-debug: input ");
- hid_resolv_usage(usage->hid);
- printk(" = %d\n", value);
}
EXPORT_SYMBOL_GPL(hid_dump_input);
@@ -786,12 +864,220 @@ static const char **names[EV_MAX + 1] = {
[EV_SND] = sounds, [EV_REP] = repeats,
};
-void hid_resolv_event(__u8 type, __u16 code) {
+void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) {
- if (!hid_debug)
- return;
-
- printk("%s.%s", events[type] ? events[type] : "?",
+ seq_printf(f, "%s.%s", events[type] ? events[type] : "?",
names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
}
-EXPORT_SYMBOL_GPL(hid_resolv_event);
+
+void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f)
+{
+ int i, j, k;
+ struct hid_report *report;
+ struct hid_usage *usage;
+
+ for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) {
+ list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
+ for (i = 0; i < report->maxfield; i++) {
+ for ( j = 0; j < report->field[i]->maxusage; j++) {
+ usage = report->field[i]->usage + j;
+ hid_resolv_usage(usage->hid, f);
+ seq_printf(f, " ---> ");
+ hid_resolv_event(usage->type, usage->code, f);
+ seq_printf(f, "\n");
+ }
+ }
+ }
+ }
+
+}
+
+
+static int hid_debug_rdesc_show(struct seq_file *f, void *p)
+{
+ struct hid_device *hdev = f->private;
+ int i;
+
+ /* dump HID report descriptor */
+ for (i = 0; i < hdev->rsize; i++)
+ seq_printf(f, "%02x ", hdev->rdesc[i]);
+ seq_printf(f, "\n\n");
+
+ /* dump parsed data and input mappings */
+ hid_dump_device(hdev, f);
+ seq_printf(f, "\n");
+ hid_dump_input_mapping(hdev, f);
+
+ return 0;
+}
+
+static int hid_debug_rdesc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hid_debug_rdesc_show, inode->i_private);
+}
+
+static int hid_debug_events_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+ struct hid_debug_list *list;
+
+ if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ list->hdev = (struct hid_device *) inode->i_private;
+ file->private_data = list;
+ mutex_init(&list->read_mutex);
+
+ list_add_tail(&list->node, &list->hdev->debug_list);
+
+out:
+ return err;
+}
+
+static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hid_debug_list *list = file->private_data;
+ int ret = 0, len;
+ DECLARE_WAITQUEUE(wait, current);
+
+ while (ret == 0) {
+ mutex_lock(&list->read_mutex);
+ if (list->head == list->tail) {
+ add_wait_queue(&list->hdev->debug_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (list->head == list->tail) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (!list->hdev || !list->hdev->debug) {
+ ret = -EIO;
+ break;
+ }
+
+ /* allow O_NONBLOCK from other threads */
+ mutex_unlock(&list->read_mutex);
+ schedule();
+ mutex_lock(&list->read_mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&list->hdev->debug_wait, &wait);
+ }
+
+ if (ret)
+ goto out;
+
+ /* pass the ringbuffer contents to userspace */
+copy_rest:
+ if (list->tail == list->head)
+ goto out;
+ if (list->tail > list->head) {
+ len = list->tail - list->head;
+
+ if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret += len;
+ list->head += len;
+ } else {
+ len = HID_DEBUG_BUFSIZE - list->head;
+
+ if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ list->head = 0;
+ ret += len;
+ goto copy_rest;
+ }
+
+ }
+out:
+ mutex_unlock(&list->read_mutex);
+ return ret;
+}
+
+static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
+{
+ struct hid_debug_list *list = file->private_data;
+
+ poll_wait(file, &list->hdev->debug_wait, wait);
+ if (list->head != list->tail)
+ return POLLIN | POLLRDNORM;
+ if (!list->hdev->debug)
+ return POLLERR | POLLHUP;
+ return 0;
+}
+
+static int hid_debug_events_release(struct inode *inode, struct file *file)
+{
+ struct hid_debug_list *list = file->private_data;
+
+ list_del(&list->node);
+ kfree(list->hid_debug_buf);
+ kfree(list);
+
+ return 0;
+}
+
+static const struct file_operations hid_debug_rdesc_fops = {
+ .open = hid_debug_rdesc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hid_debug_events_fops = {
+ .owner = THIS_MODULE,
+ .open = hid_debug_events_open,
+ .read = hid_debug_events_read,
+ .poll = hid_debug_events_poll,
+ .release = hid_debug_events_release,
+};
+
+
+void hid_debug_register(struct hid_device *hdev, const char *name)
+{
+ hdev->debug_dir = debugfs_create_dir(name, hid_debug_root);
+ hdev->debug_rdesc = debugfs_create_file("rdesc", 0400,
+ hdev->debug_dir, hdev, &hid_debug_rdesc_fops);
+ hdev->debug_events = debugfs_create_file("events", 0400,
+ hdev->debug_dir, hdev, &hid_debug_events_fops);
+ hdev->debug = 1;
+}
+
+void hid_debug_unregister(struct hid_device *hdev)
+{
+ hdev->debug = 0;
+ wake_up_interruptible(&hdev->debug_wait);
+ debugfs_remove(hdev->debug_rdesc);
+ debugfs_remove(hdev->debug_events);
+ debugfs_remove(hdev->debug_dir);
+}
+
+void hid_debug_init(void)
+{
+ hid_debug_root = debugfs_create_dir("hid", NULL);
+}
+
+void hid_debug_exit(void)
+{
+ debugfs_remove_recursive(hid_debug_root);
+}
+
diff --git a/drivers/hid/hid-ezkey.c b/drivers/hid/hid-ezkey.c
index 0a1fe054799b..ca1163e9d42d 100644
--- a/drivers/hid/hid-ezkey.c
+++ b/drivers/hid/hid-ezkey.c
@@ -78,12 +78,12 @@ static struct hid_driver ez_driver = {
.event = ez_event,
};
-static int ez_init(void)
+static int __init ez_init(void)
{
return hid_register_driver(&ez_driver);
}
-static void ez_exit(void)
+static void __exit ez_exit(void)
{
hid_unregister_driver(&ez_driver);
}
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index d42d222097a8..cab13e8c7d29 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -81,12 +81,12 @@ static struct hid_driver gyration_driver = {
.event = gyration_event,
};
-static int gyration_init(void)
+static int __init gyration_init(void)
{
return hid_register_driver(&gyration_driver);
}
-static void gyration_exit(void)
+static void __exit gyration_exit(void)
{
hid_unregister_driver(&gyration_driver);
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 630101037921..398f731fd699 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -296,6 +296,7 @@
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
+#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7f183b7147e1..5862b0f3b55d 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -159,17 +159,12 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
field->hidinput = hidinput;
- dbg_hid("Mapping: ");
- hid_resolv_usage(usage->hid);
- dbg_hid_line(" ---> ");
-
if (field->flags & HID_MAIN_ITEM_CONSTANT)
goto ignore;
/* only LED usages are supported in output fields */
if (field->report_type == HID_OUTPUT_REPORT &&
(usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
- dbg_hid_line(" [non-LED output field] ");
goto ignore;
}
@@ -561,15 +556,9 @@ mapped:
set_bit(MSC_SCAN, input->mscbit);
}
- hid_resolv_event(usage->type, usage->code);
-
- dbg_hid_line("\n");
-
- return;
-
ignore:
- dbg_hid_line("IGNORED\n");
return;
+
}
void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
diff --git a/drivers/hid/hid-kensington.c b/drivers/hid/hid-kensington.c
index 7353bd79cbe9..a5b4016e9bd7 100644
--- a/drivers/hid/hid-kensington.c
+++ b/drivers/hid/hid-kensington.c
@@ -48,12 +48,12 @@ static struct hid_driver ks_driver = {
.input_mapping = ks_input_mapping,
};
-static int ks_init(void)
+static int __init ks_init(void)
{
return hid_register_driver(&ks_driver);
}
-static void ks_exit(void)
+static void __exit ks_exit(void)
{
hid_unregister_driver(&ks_driver);
}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 72ee3fec56d9..f8871712b7b5 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -54,12 +54,12 @@ static struct hid_driver kye_driver = {
.report_fixup = kye_report_fixup,
};
-static int kye_init(void)
+static int __init kye_init(void)
{
return hid_register_driver(&kye_driver);
}
-static void kye_exit(void)
+static void __exit kye_exit(void)
{
hid_unregister_driver(&kye_driver);
}
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 7afbaa0efd18..0f870a3243ed 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -299,6 +299,8 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
.driver_data = LG_FF },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
+ .driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
.driver_data = LG_FF2 },
{ }
@@ -315,12 +317,12 @@ static struct hid_driver lg_driver = {
.probe = lg_probe,
};
-static int lg_init(void)
+static int __init lg_init(void)
{
return hid_register_driver(&lg_driver);
}
-static void lg_exit(void)
+static void __exit lg_exit(void)
{
hid_unregister_driver(&lg_driver);
}
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 56099709581c..95835ea5690f 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -67,6 +67,7 @@ static const struct dev_type devices[] = {
{ 0x046d, 0xc219, ff_rumble },
{ 0x046d, 0xc283, ff_joystick },
{ 0x046d, 0xc286, ff_joystick_ac },
+ { 0x046d, 0xc293, ff_joystick },
{ 0x046d, 0xc294, ff_wheel },
{ 0x046d, 0xc295, ff_joystick },
{ 0x046d, 0xca03, ff_wheel },
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 5e9e37a0506d..359cc447c6c6 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -197,12 +197,12 @@ static struct hid_driver ms_driver = {
.probe = ms_probe,
};
-static int ms_init(void)
+static int __init ms_init(void)
{
return hid_register_driver(&ms_driver);
}
-static void ms_exit(void)
+static void __exit ms_exit(void)
{
hid_unregister_driver(&ms_driver);
}
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index 240f87618be6..2cd05aa244b9 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -65,12 +65,12 @@ static struct hid_driver mr_driver = {
.input_mapping = mr_input_mapping,
};
-static int mr_init(void)
+static int __init mr_init(void)
{
return hid_register_driver(&mr_driver);
}
-static void mr_exit(void)
+static void __exit mr_exit(void)
{
hid_unregister_driver(&mr_driver);
}
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 75ed9d2c1a36..49ce69d7bba7 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -27,6 +27,9 @@
struct ntrig_data {
__s32 x, y, id, w, h;
char reading_a_point, found_contact_id;
+ char pen_active;
+ char finger_active;
+ char inverted;
};
/*
@@ -63,10 +66,7 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
/* we do not want to map these for now */
- case HID_DG_INVERT: /* value is always 0 */
- case HID_DG_ERASER: /* value is always 0 */
case HID_DG_CONTACTID: /* value is useless */
- case HID_DG_BARRELSWITCH: /* doubtful */
case HID_DG_INPUTMODE:
case HID_DG_DEVICEINDEX:
case HID_DG_CONTACTCOUNT:
@@ -125,6 +125,18 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
if (hid->claimed & HID_CLAIMED_INPUT) {
switch (usage->hid) {
+
+ case HID_DG_INRANGE:
+ if (field->application & 0x3)
+ nd->pen_active = (value != 0);
+ else
+ nd->finger_active = (value != 0);
+ return 0;
+
+ case HID_DG_INVERT:
+ nd->inverted = value;
+ return 0;
+
case HID_GD_X:
nd->x = value;
nd->reading_a_point = 1;
@@ -147,7 +159,11 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
* report received in a finger event. We want
* to emit a normal (X, Y) position
*/
- if (! nd->found_contact_id) {
+ if (!nd->found_contact_id) {
+ if (nd->pen_active && nd->finger_active) {
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
+ }
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
@@ -159,6 +175,14 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
* to emit a normal (X, Y) position
*/
if (! nd->found_contact_id) {
+ if (nd->pen_active && nd->finger_active) {
+ input_report_key(input,
+ nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN
+ , 0);
+ input_report_key(input,
+ nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN
+ , 1);
+ }
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
input_event(input, EV_ABS, ABS_PRESSURE, value);
@@ -233,6 +257,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
kfree (nd);
+
return ret;
}
@@ -265,12 +290,12 @@ static struct hid_driver ntrig_driver = {
.event = ntrig_event,
};
-static int ntrig_init(void)
+static int __init ntrig_init(void)
{
return hid_register_driver(&ntrig_driver);
}
-static void ntrig_exit(void)
+static void __exit ntrig_exit(void)
{
hid_unregister_driver(&ntrig_driver);
}
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 2e83e8ff891a..500fbd0652dc 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -105,12 +105,12 @@ static struct hid_driver pl_driver = {
.probe = pl_probe,
};
-static int pl_init(void)
+static int __init pl_init(void)
{
return hid_register_driver(&pl_driver);
}
-static void pl_exit(void)
+static void __exit pl_exit(void)
{
hid_unregister_driver(&pl_driver);
}
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 4db9a3483760..c6d7dbc935b1 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -217,12 +217,12 @@ static struct hid_driver pl_driver = {
.probe = pl_probe,
};
-static int pl_init(void)
+static int __init pl_init(void)
{
return hid_register_driver(&pl_driver);
}
-static void pl_exit(void)
+static void __exit pl_exit(void)
{
hid_unregister_driver(&pl_driver);
}
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 07083aa6c19a..5b222eed0692 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -25,25 +25,48 @@
/*
* Samsung IrDA remote controller (reports as Cypress USB Mouse).
*
+ * There are several variants for 0419:0001:
+ *
+ * 1. 184 byte report descriptor
* Vendor specific report #4 has a size of 48 bit,
* and therefore is not accepted when inspecting the descriptors.
* As a workaround we reinterpret the report as:
* Variable type, count 6, size 8 bit, log. maximum 255
* The burden to reconstruct the data is moved into user space.
+ *
+ * 2. 203 byte report descriptor
+ * Report #4 has an array field with logical range 0..18 instead of 1..15.
+ *
+ * 3. 135 byte report descriptor
+ * Report #4 has an array field with logical range 0..17 instead of 1..14.
*/
static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int rsize)
{
- if (rsize >= 182 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
+ if (rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
rdesc[182] == 0x40) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA report "
- "descriptor\n");
+ dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
+ "descriptor\n", 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
rdesc[182] = 0x42;
+ } else
+ if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
+ rdesc[194] == 0x25 && rdesc[195] == 0x12) {
+ dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
+ "descriptor\n", 203);
+ rdesc[193] = 0x1;
+ rdesc[195] = 0xf;
+ } else
+ if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
+ rdesc[126] == 0x25 && rdesc[127] == 0x11) {
+ dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
+ "descriptor\n", 135);
+ rdesc[125] = 0x1;
+ rdesc[127] = 0xe;
}
}
@@ -51,6 +74,7 @@ static int samsung_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int ret;
+ unsigned int cmask = HID_CONNECT_DEFAULT;
ret = hid_parse(hdev);
if (ret) {
@@ -58,8 +82,13 @@ static int samsung_probe(struct hid_device *hdev,
goto err_free;
}
- ret = hid_hw_start(hdev, (HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT) |
- HID_CONNECT_HIDDEV_FORCE);
+ if (hdev->rsize == 184) {
+ /* disable hidinput, force hiddev */
+ cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
+ HID_CONNECT_HIDDEV_FORCE;
+ }
+
+ ret = hid_hw_start(hdev, cmask);
if (ret) {
dev_err(&hdev->dev, "hw start failed\n");
goto err_free;
@@ -83,12 +112,12 @@ static struct hid_driver samsung_driver = {
.probe = samsung_probe,
};
-static int samsung_init(void)
+static int __init samsung_init(void)
{
return hid_register_driver(&samsung_driver);
}
-static void samsung_exit(void)
+static void __exit samsung_exit(void)
{
hid_unregister_driver(&samsung_driver);
}
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index eab169e5c371..203c438b016f 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -163,12 +163,12 @@ static struct hid_driver sjoy_driver = {
.probe = sjoy_probe,
};
-static int sjoy_init(void)
+static int __init sjoy_init(void)
{
return hid_register_driver(&sjoy_driver);
}
-static void sjoy_exit(void)
+static void __exit sjoy_exit(void)
{
hid_unregister_driver(&sjoy_driver);
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index c2599388a350..4e8450228a24 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -135,12 +135,12 @@ static struct hid_driver sony_driver = {
.report_fixup = sony_report_fixup,
};
-static int sony_init(void)
+static int __init sony_init(void)
{
return hid_register_driver(&sony_driver);
}
-static void sony_exit(void)
+static void __exit sony_exit(void)
{
hid_unregister_driver(&sony_driver);
}
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index e0a8fd36a85b..438107d9f1b2 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -65,12 +65,12 @@ static struct hid_driver sp_driver = {
.input_mapping = sp_input_mapping,
};
-static int sp_init(void)
+static int __init sp_init(void)
{
return hid_register_driver(&sp_driver);
}
-static void sp_exit(void)
+static void __exit sp_exit(void)
{
hid_unregister_driver(&sp_driver);
}
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index fcd6ccd02fee..167ea746fb9c 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -243,7 +243,11 @@ err:
static const struct hid_device_id tm_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300),
.driver_data = (unsigned long)ff_rumble },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304),
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
+ .driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
+ .driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */
.driver_data = (unsigned long)ff_rumble },
@@ -259,12 +263,12 @@ static struct hid_driver tm_driver = {
.probe = tm_probe,
};
-static int tm_init(void)
+static int __init tm_init(void)
{
return hid_register_driver(&tm_driver);
}
-static void tm_exit(void)
+static void __exit tm_exit(void)
{
hid_unregister_driver(&tm_driver);
}
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 152ccfabeba5..6925eda1081a 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -60,12 +60,12 @@ static struct hid_driver ts_driver = {
.input_mapping = ts_input_mapping,
};
-static int ts_init(void)
+static int __init ts_init(void)
{
return hid_register_driver(&ts_driver);
}
-static void ts_exit(void)
+static void __exit ts_exit(void)
{
hid_unregister_driver(&ts_driver);
}
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 1f9237f511e3..747542172242 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -237,7 +237,7 @@ static struct hid_driver wacom_driver = {
.raw_event = wacom_raw_event,
};
-static int wacom_init(void)
+static int __init wacom_init(void)
{
int ret;
@@ -248,7 +248,7 @@ static int wacom_init(void)
return ret;
}
-static void wacom_exit(void)
+static void __exit wacom_exit(void)
{
hid_unregister_driver(&wacom_driver);
}
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 57f710757bf4..a79f0d78c6be 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -152,12 +152,12 @@ static struct hid_driver zp_driver = {
.probe = zp_probe,
};
-static int zp_init(void)
+static int __init zp_init(void)
{
return hid_register_driver(&zp_driver);
}
-static void zp_exit(void)
+static void __exit zp_exit(void)
{
hid_unregister_driver(&zp_driver);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76c4bbe9dccb..a72e3149810f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -4,8 +4,8 @@
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2008 Jiri Kosina
* Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2009 Jiri Kosina
*/
/*
@@ -886,11 +886,6 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
- dbg_hid("report descriptor (size %u, read %d) = ", rsize, n);
- for (n = 0; n < rsize; n++)
- dbg_hid_line(" %02x", (unsigned char) rdesc[n]);
- dbg_hid_line("\n");
-
ret = hid_parse_report(hid, rdesc, rsize);
kfree(rdesc);
if (ret) {
@@ -987,7 +982,6 @@ static int usbhid_start(struct hid_device *hid)
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
- spin_lock_init(&usbhid->lock);
usbhid->intf = intf;
usbhid->ifnum = interface->desc.bInterfaceNumber;
@@ -1005,7 +999,6 @@ static int usbhid_start(struct hid_device *hid)
usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
usbhid_init_reports(hid);
- hid_dump_device(hid);
set_bit(HID_STARTED, &usbhid->iofl);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 9e9421525fb9..215b2addddbb 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -527,8 +527,10 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
goto goodreturn;
case HIDIOCGCOLLECTIONINDEX:
+ i = field->usage[uref->usage_index].collection_index;
+ unlock_kernel();
kfree(uref_multi);
- return field->usage[uref->usage_index].collection_index;
+ return i;
case HIDIOCGUSAGES:
for (i = 0; i < uref_multi->num_values; i++)
uref_multi->values[i] =
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 2d5016691d40..a897732af4a9 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -797,6 +797,16 @@ config SENSORS_TMP401
This driver can also be built as a module. If so, the module
will be called tmp401.
+config SENSORS_TMP421
+ tristate "Texas Instruments TMP421 and compatible"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Texas Instruments TMP421,
+ TMP422 and TMP423 temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp421.
+
config SENSORS_VIA686A
tristate "VIA686A"
depends on PCI
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b793dce6bed5..537daac4b35a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
+obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index ad2b3431b725..7d3f15d32fdf 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -357,7 +357,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX5 Fan", 39, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, {
+ { 0x0014, "AB9", /* + AB9 Pro */ {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 10, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -455,7 +455,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 FAN", 37, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0018, NULL /* Unknown, need DMI string */, {
+ { 0x0018, "AB9 QuadGT", {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -564,7 +564,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "AUX3 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x001C, NULL /* Unknown, need DMI string */, {
+ { 0x001C, "IX38 QuadGT", {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR2", 1, 0, 20, 1, 0 },
{ "DDR2 VTT", 2, 0, 10, 1, 0 },
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index bfc296145bba..bf0862a803c0 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -179,8 +179,14 @@ struct vrm_model {
static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
{X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
- {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
+ /* In theory, all NPT family 0Fh processors have 6 VID pins and should
+ thus use vrm 25, however in practice not all mainboards route the
+ 6th VID pin because it is never needed. So we use the 5 VID pin
+ variant (vrm 24) for the models which exist today. */
+ {X86_VENDOR_AMD, 0xF, 0x7F, ANY, 24}, /* NPT family 0Fh */
+ {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* future fam. 0Fh */
{X86_VENDOR_AMD, 0x10, ANY, ANY, 25}, /* NPT family 10h */
+
{X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
{X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
{X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
@@ -191,12 +197,14 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_INTEL, 0xF, 0x1, ANY, 90}, /* P4 Willamette */
{X86_VENDOR_INTEL, 0xF, 0x2, ANY, 90}, /* P4 Northwood */
{X86_VENDOR_INTEL, 0xF, ANY, ANY, 100}, /* Prescott and above assume VRD 10 */
+
{X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */
{X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */
{X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nemiah */
{X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M, Eden-N */
{X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0}, /* No information */
{X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7, Esther */
+
{X86_VENDOR_UNKNOWN, ANY, ANY, ANY, 0} /* stop here */
};
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 86142a858238..58f66be61b1f 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -418,6 +418,7 @@ static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
data->count = 3;
break;
default:
+ mutex_unlock(&data->update_lock);
dev_err(&client->dev,
"illegal value for fan divider (%d)\n", div);
return -EINVAL;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 56cd6004da36..6290a259456e 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -257,7 +257,7 @@ static inline int sht15_update_single_val(struct sht15_data *data,
(data->flag == SHT15_READING_NOTHING),
msecs_to_jiffies(timeout_msecs));
if (ret == 0) {/* timeout occurred */
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));;
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
sht15_connection_reset(data);
return -ETIME;
}
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
new file mode 100644
index 000000000000..20924343431b
--- /dev/null
+++ b/drivers/hwmon/tmp421.c
@@ -0,0 +1,347 @@
+/* tmp421.c
+ *
+ * Copyright (C) 2009 Andre Prendel <andre.prendel@gmx.de>
+ * Preliminary support by:
+ * Melvin Rook, Raymond Ng
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Driver for the Texas Instruments TMP421 SMBus temperature sensor IC.
+ * Supported models: TMP421, TMP422, TMP423
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
+ I2C_CLIENT_END };
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_3(tmp421, tmp422, tmp423);
+
+/* The TMP421 registers */
+#define TMP421_CONFIG_REG_1 0x09
+#define TMP421_CONVERSION_RATE_REG 0x0B
+#define TMP421_MANUFACTURER_ID_REG 0xFE
+#define TMP421_DEVICE_ID_REG 0xFF
+
+static const u8 TMP421_TEMP_MSB[4] = { 0x00, 0x01, 0x02, 0x03 };
+static const u8 TMP421_TEMP_LSB[4] = { 0x10, 0x11, 0x12, 0x13 };
+
+/* Flags */
+#define TMP421_CONFIG_SHUTDOWN 0x40
+#define TMP421_CONFIG_RANGE 0x04
+
+/* Manufacturer / Device ID's */
+#define TMP421_MANUFACTURER_ID 0x55
+#define TMP421_DEVICE_ID 0x21
+#define TMP422_DEVICE_ID 0x22
+#define TMP423_DEVICE_ID 0x23
+
+static const struct i2c_device_id tmp421_id[] = {
+ { "tmp421", tmp421 },
+ { "tmp422", tmp422 },
+ { "tmp423", tmp423 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp421_id);
+
+struct tmp421_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid;
+ unsigned long last_updated;
+ int kind;
+ u8 config;
+ s16 temp[4];
+};
+
+static int temp_from_s16(s16 reg)
+{
+ int temp = reg;
+
+ return (temp * 1000 + 128) / 256;
+}
+
+static int temp_from_u16(u16 reg)
+{
+ int temp = reg;
+
+ /* Add offset for extended temperature range. */
+ temp -= 64 * 256;
+
+ return (temp * 1000 + 128) / 256;
+}
+
+static struct tmp421_data *tmp421_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp421_data *data = i2c_get_clientdata(client);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ data->config = i2c_smbus_read_byte_data(client,
+ TMP421_CONFIG_REG_1);
+
+ for (i = 0; i <= data->kind; i++) {
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ TMP421_TEMP_MSB[i]) << 8;
+ data->temp[i] |= i2c_smbus_read_byte_data(client,
+ TMP421_TEMP_LSB[i]);
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static ssize_t show_temp_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp421_data *data = tmp421_update_device(dev);
+ int temp;
+
+ mutex_lock(&data->update_lock);
+ if (data->config & TMP421_CONFIG_RANGE)
+ temp = temp_from_u16(data->temp[index]);
+ else
+ temp = temp_from_s16(data->temp[index]);
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t show_fault(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct tmp421_data *data = tmp421_update_device(dev);
+
+ /*
+ * The OPEN bit signals a fault. This is bit 0 of the temperature
+ * register (low byte).
+ */
+ if (data->temp[index] & 0x01)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct tmp421_data *data = dev_get_drvdata(dev);
+ struct device_attribute *devattr;
+ unsigned int index;
+
+ devattr = container_of(a, struct device_attribute, attr);
+ index = to_sensor_dev_attr(devattr)->index;
+
+ if (data->kind > index)
+ return a->mode;
+
+ return 0;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
+
+static struct attribute *tmp421_attr[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp421_group = {
+ .attrs = tmp421_attr,
+ .is_visible = tmp421_is_visible,
+};
+
+static int tmp421_init_client(struct i2c_client *client)
+{
+ int config, config_orig;
+
+ /* Set the conversion rate to 2 Hz */
+ i2c_smbus_write_byte_data(client, TMP421_CONVERSION_RATE_REG, 0x05);
+
+ /* Start conversions (disable shutdown if necessary) */
+ config = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
+ if (config < 0) {
+ dev_err(&client->dev, "Could not read configuration"
+ " register (%d)\n", config);
+ return -ENODEV;
+ }
+
+ config_orig = config;
+ config &= ~TMP421_CONFIG_SHUTDOWN;
+
+ if (config != config_orig) {
+ dev_info(&client->dev, "Enable monitoring chip\n");
+ i2c_smbus_write_byte_data(client, TMP421_CONFIG_REG_1, config);
+ }
+
+ return 0;
+}
+
+static int tmp421_detect(struct i2c_client *client, int kind,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ const char *names[] = { "TMP421", "TMP422", "TMP423" };
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ if (kind <= 0) {
+ u8 reg;
+
+ reg = i2c_smbus_read_byte_data(client,
+ TMP421_MANUFACTURER_ID_REG);
+ if (reg != TMP421_MANUFACTURER_ID)
+ return -ENODEV;
+
+ reg = i2c_smbus_read_byte_data(client,
+ TMP421_DEVICE_ID_REG);
+ switch (reg) {
+ case TMP421_DEVICE_ID:
+ kind = tmp421;
+ break;
+ case TMP422_DEVICE_ID:
+ kind = tmp422;
+ break;
+ case TMP423_DEVICE_ID:
+ kind = tmp423;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+ strlcpy(info->type, tmp421_id[kind - 1].name, I2C_NAME_SIZE);
+ dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n",
+ names[kind - 1], client->addr);
+
+ return 0;
+}
+
+static int tmp421_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tmp421_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct tmp421_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+ data->kind = id->driver_data;
+
+ err = tmp421_init_client(client);
+ if (err)
+ goto exit_free;
+
+ err = sysfs_create_group(&client->dev.kobj, &tmp421_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ data->hwmon_dev = NULL;
+ goto exit_remove;
+ }
+ return 0;
+
+exit_remove:
+ sysfs_remove_group(&client->dev.kobj, &tmp421_group);
+
+exit_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return err;
+}
+
+static int tmp421_remove(struct i2c_client *client)
+{
+ struct tmp421_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &tmp421_group);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+static struct i2c_driver tmp421_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp421",
+ },
+ .probe = tmp421_probe,
+ .remove = tmp421_remove,
+ .id_table = tmp421_id,
+ .detect = tmp421_detect,
+ .address_data = &addr_data,
+};
+
+static int __init tmp421_init(void)
+{
+ return i2c_add_driver(&tmp421_driver);
+}
+
+static void __exit tmp421_exit(void)
+{
+ i2c_del_driver(&tmp421_driver);
+}
+
+MODULE_AUTHOR("Andre Prendel <andre.prendel@gmx.de>");
+MODULE_DESCRIPTION("Texas Instruments TMP421/422/423 temperature sensor"
+ " driver");
+MODULE_LICENSE("GPL");
+
+module_init(tmp421_init);
+module_exit(tmp421_exit);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index aa87b6a3bbef..8206442fbabd 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -328,6 +328,7 @@ config I2C_DAVINCI
config I2C_DESIGNWARE
tristate "Synopsys DesignWare"
+ depends on HAVE_CLK
help
If you say yes to this option, support will be included for the
Synopsys DesignWare I2C adapter. Only master mode is supported.
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 762e1e530882..049555777f67 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1134,35 +1134,44 @@ static int __exit i2c_pxa_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM
-static int i2c_pxa_suspend_late(struct platform_device *dev, pm_message_t state)
+static int i2c_pxa_suspend_noirq(struct device *dev)
{
- struct pxa_i2c *i2c = platform_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pxa_i2c *i2c = platform_get_drvdata(pdev);
+
clk_disable(i2c->clk);
+
return 0;
}
-static int i2c_pxa_resume_early(struct platform_device *dev)
+static int i2c_pxa_resume_noirq(struct device *dev)
{
- struct pxa_i2c *i2c = platform_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pxa_i2c *i2c = platform_get_drvdata(pdev);
clk_enable(i2c->clk);
i2c_pxa_reset(i2c);
return 0;
}
+
+static struct dev_pm_ops i2c_pxa_dev_pm_ops = {
+ .suspend_noirq = i2c_pxa_suspend_noirq,
+ .resume_noirq = i2c_pxa_resume_noirq,
+};
+
+#define I2C_PXA_DEV_PM_OPS (&i2c_pxa_dev_pm_ops)
#else
-#define i2c_pxa_suspend_late NULL
-#define i2c_pxa_resume_early NULL
+#define I2C_PXA_DEV_PM_OPS NULL
#endif
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
.remove = __exit_p(i2c_pxa_remove),
- .suspend_late = i2c_pxa_suspend_late,
- .resume_early = i2c_pxa_resume_early,
.driver = {
.name = "pxa2xx-i2c",
.owner = THIS_MODULE,
+ .pm = I2C_PXA_DEV_PM_OPS,
},
.id_table = i2c_pxa_id_table,
};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 8f42a4536cdf..3be0d6a4d630 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -951,17 +951,20 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int s3c24xx_i2c_suspend_late(struct platform_device *dev,
- pm_message_t msg)
+static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
- struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+
i2c->suspended = 1;
+
return 0;
}
-static int s3c24xx_i2c_resume(struct platform_device *dev)
+static int s3c24xx_i2c_resume(struct device *dev)
{
- struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
i2c->suspended = 0;
s3c24xx_i2c_init(i2c);
@@ -969,9 +972,14 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
return 0;
}
+static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
+ .suspend_noirq = s3c24xx_i2c_suspend_noirq,
+ .resume = s3c24xx_i2c_resume,
+};
+
+#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
#else
-#define s3c24xx_i2c_suspend_late NULL
-#define s3c24xx_i2c_resume NULL
+#define S3C24XX_DEV_PM_OPS NULL
#endif
/* device driver for platform bus bits */
@@ -990,12 +998,11 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
- .suspend_late = s3c24xx_i2c_suspend_late,
- .resume = s3c24xx_i2c_resume,
.id_table = s3c24xx_driver_ids,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-i2c",
+ .pm = S3C24XX_DEV_PM_OPS,
},
};
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index bd066bb9d611..09f98ed0731f 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -135,6 +135,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
hw[0].irq = 14;
+ hw[1].irq = 15;
return ide_host_add(d, hws, 2, NULL);
}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 77f79d26b264..c509c9916464 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -92,6 +92,11 @@ int ide_acpi_init(void)
return 0;
}
+bool ide_port_acpi(ide_hwif_t *hwif)
+{
+ return ide_noacpi == 0 && hwif->acpidata;
+}
+
/**
* ide_get_dev_handle - finds acpi_handle and PCI device.function
* @dev: device to locate
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive)
unsigned long gtf_address;
unsigned long obj_loc;
- if (ide_noacpi)
- return 0;
-
DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
struct acpi_buffer output;
union acpi_object *out_obj;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Setting up output buffer for _GTM */
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
struct ide_acpi_drive_link *master = &hwif->acpidata->master;
struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Give the GTM buffer + drive Identify data to the channel via the
* _STM method: */
/* setup input parameters buffer for _STM */
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
ide_drive_t *drive;
int i;
- if (ide_noacpi || ide_noacpi_psx)
+ if (ide_noacpi_psx)
return;
DEBPRINT("ENTER:\n");
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* channel first and then drives for power on and verse versa for power off */
if (on)
acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
drive->name, err);
}
- if (!ide_acpionboot) {
+ if (ide_noacpi || ide_acpionboot == 0) {
DEBPRINT("ACPI methods disabled on boot\n");
return;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 4a19686fcfe9..6a9a769bffc1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
} else if (!blk_pc_request(rq)) {
ide_cd_request_sense_fixup(drive, cmd);
- /* complain if we still have data left to transfer */
+
uptodate = cmd->nleft ? 0 : 1;
- if (uptodate == 0)
+
+ /*
+ * suck out the remaining bytes from the drive in an
+ * attempt to complete the data xfer. (see BZ#13399)
+ */
+ if (!(stat & ATA_ERR) && !uptodate && thislen) {
+ ide_pio_bytes(drive, cmd, write, thislen);
+ uptodate = cmd->nleft ? 0 : 1;
+ }
+
+ if (!uptodate)
rq->cmd_flags |= REQ_FAILED;
}
goto out_end;
@@ -876,9 +886,12 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
return stat;
/*
- * Sanity check the given block size
+ * Sanity check the given block size, in so far as making
+ * sure the sectors_per_frame we give to the caller won't
+ * end up being bogus.
*/
blocklen = be32_to_cpu(capbuf.blocklen);
+ blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
switch (blocklen) {
case 512:
case 1024:
@@ -886,10 +899,9 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
case 4096:
break;
default:
- printk(KERN_ERR PFX "%s: weird block size %u\n",
+ printk_once(KERN_ERR PFX "%s: weird block size %u; "
+ "setting default block size to 2048\n",
drive->name, blocklen);
- printk(KERN_ERR PFX "%s: default to 2kb block size\n",
- drive->name);
blocklen = 2048;
break;
}
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 5bf958e5b1d5..1099bf7cf968 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
err = setfunc(drive, *(int *)&rq->cmd[1]);
if (err)
rq->errors = err;
- ide_complete_rq(drive, err, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 219e6fb78dc6..ee58c88dee5a 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -361,9 +361,6 @@ static int ide_tune_dma(ide_drive_t *drive)
if (__ide_dma_bad_drive(drive))
return 0;
- if (ide_id_dma_bug(drive))
- return 0;
-
if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
return config_drive_for_dma(drive);
@@ -394,24 +391,6 @@ static int ide_dma_check(ide_drive_t *drive)
return -1;
}
-int ide_id_dma_bug(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- if (id[ATA_ID_FIELD_VALID] & 4) {
- if ((id[ATA_ID_UDMA_MODES] >> 8) &&
- (id[ATA_ID_MWDMA_MODES] >> 8))
- goto err_out;
- } else if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
- (id[ATA_ID_SWDMA_MODES] >> 8))
- goto err_out;
-
- return 0;
-err_out:
- printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
- return 1;
-}
-
int ide_set_dma(ide_drive_t *drive)
{
int rc;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 2b9141979613..e9abf2c3c335 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
}
}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8b3f204f7d73..fefbdfc8db06 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -293,7 +293,7 @@ out_end:
drive->failed_pc = NULL;
if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1059f809b809..d5f3c77beadd 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
}
}
-/* obsolete, blk_rq_bytes() should be used instead */
-unsigned int ide_rq_bytes(struct request *rq)
-{
- if (blk_pc_request(rq))
- return blk_rq_bytes(rq);
- else
- return blk_rq_cur_sectors(rq) << 9;
-}
-EXPORT_SYMBOL_GPL(ide_rq_bytes);
-
int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
{
ide_hwif_t *hwif = drive->hwif;
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
if ((media == ide_floppy || media == ide_tape) && drv_req) {
rq->errors = 0;
- ide_complete_rq(drive, 0, blk_rq_bytes(rq));
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
else if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
}
+
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
}
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
@@ -476,10 +466,14 @@ void do_ide_request(struct request_queue *q)
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
-
- WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
+
+ if (drive->dev_flags & IDE_DFLAG_BLOCKED)
+ rq = hwif->rq;
+ else
+ WARN_ON_ONCE(hwif->rq);
+
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@@ -506,43 +500,29 @@ repeat:
hwif->cur_dev = drive;
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
- spin_unlock_irq(&hwif->lock);
- spin_lock_irq(q->queue_lock);
- /*
- * we know that the queue isn't empty, but this can happen
- * if the q->prep_rq_fn() decides to kill a request
- */
- if (!rq)
+ if (rq == NULL) {
+ spin_unlock_irq(&hwif->lock);
+ spin_lock_irq(q->queue_lock);
+ /*
+ * we know that the queue isn't empty, but this can
+ * happen if ->prep_rq_fn() decides to kill a request
+ */
rq = blk_fetch_request(drive->queue);
+ spin_unlock_irq(q->queue_lock);
+ spin_lock_irq(&hwif->lock);
- spin_unlock_irq(q->queue_lock);
- spin_lock_irq(&hwif->lock);
-
- if (!rq) {
- ide_unlock_port(hwif);
- goto out;
+ if (rq == NULL) {
+ ide_unlock_port(hwif);
+ goto out;
+ }
}
/*
* Sanity: don't accept a request that isn't a PM request
- * if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the blk_fetch_request()
- * above to return us whatever is in the queue. Since we call
- * ide_do_request() ourselves, we end up taking requests while
- * the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
+ * if we are currently power managed.
*/
- if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
- blk_pm_request(rq) == 0 &&
- (rq->cmd_flags & REQ_PREEMPT) == 0) {
- /* there should be no pending command at this point */
- ide_unlock_port(hwif);
- goto plug_device;
- }
+ BUG_ON((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
+ blk_pm_request(rq) == 0);
hwif->rq = rq;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 82f252c3ee6e..e246d3d3fbcc 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
goto out;
}
- id = kmalloc(size, GFP_KERNEL);
+ /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
+ id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
if (id == NULL) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index fa047150a1c6..2892b242bbe1 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -210,6 +210,7 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
*/
static const struct drive_list_entry ivb_list[] = {
{ "QUANTUM FIREBALLlct10 05" , "A03.0900" },
+ { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB01" },
{ "TSSTcorp CDDVDW SH-S202N" , "SB00" },
@@ -329,9 +330,6 @@ int ide_driveid_update(ide_drive_t *drive)
kfree(id);
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
- ide_dma_off(drive);
-
return 1;
out_err:
if (rc == 2)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index c14ca144cffe..ad7be2669dcb 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
struct request_pm_state rqpm;
int ret;
- /* call ACPI _GTM only once */
- if ((drive->dn & 1) == 0 || pair == NULL)
- ide_acpi_get_timing(hwif);
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _GTM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL)
+ ide_acpi_get_timing(hwif);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ret = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
- /* call ACPI _PS3 only after both devices are suspended */
- if (ret == 0 && ((drive->dn & 1) || pair == NULL))
- ide_acpi_set_state(hwif, 0);
+ if (ret == 0 && ide_port_acpi(hwif)) {
+ /* call ACPI _PS3 only after both devices are suspended */
+ if ((drive->dn & 1) || pair == NULL)
+ ide_acpi_set_state(hwif, 0);
+ }
return ret;
}
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev)
struct request_pm_state rqpm;
int err;
- /* call ACPI _PS0 / _STM only once */
- if ((drive->dn & 1) == 0 || pair == NULL) {
- ide_acpi_set_state(hwif, 1);
- ide_acpi_push_timing(hwif);
- }
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _PS0 / _STM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL) {
+ ide_acpi_set_state(hwif, 1);
+ ide_acpi_push_timing(hwif);
+ }
- ide_acpi_exec_tfs(drive);
+ ide_acpi_exec_tfs(drive);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 51af4eea0d36..1bb106f6221a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -818,6 +818,24 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
return j;
}
+static void ide_host_enable_irqs(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ /* clear any pending IRQs */
+ hwif->tp_ops->read_status(hwif);
+
+ /* unmask IRQs */
+ if (hwif->io_ports.ctl_addr)
+ hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
+ }
+}
+
/*
* This routine sets up the IRQ for an IDE interface.
*/
@@ -831,9 +849,6 @@ static int init_irq (ide_hwif_t *hwif)
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (io_ports->ctl_addr)
- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
goto out_up;
@@ -1404,6 +1419,8 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
ide_port_tune_devices(hwif);
}
+ ide_host_enable_irqs(host);
+
ide_host_for_each_port(i, hwif, host) {
if (hwif == NULL)
continue;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 83b734aec923..52b25f8b111d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -880,6 +880,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
}
shost->hostdata[0] = (unsigned long)lu;
+ shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
if (!scsi_add_host(shost, &ud->device)) {
lu->shost = shost;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index c5036f1cc5b0..64a3a66a8a39 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -25,6 +25,12 @@
#define SBP2_DEVICE_NAME "sbp2"
/*
+ * There is no transport protocol limit to the CDB length, but we implement
+ * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
+ */
+#define SBP2_MAX_CDB_SIZE 16
+
+/*
* SBP-2 specific definitions
*/
@@ -51,7 +57,7 @@ struct sbp2_command_orb {
u32 data_descriptor_hi;
u32 data_descriptor_lo;
u32 misc;
- u8 cdb[12];
+ u8 cdb[SBP2_MAX_CDB_SIZE];
} __attribute__((packed));
#define SBP2_LOGIN_REQUEST 0x0
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 9d8f796c6745..a6b989a9dc07 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -12,6 +12,42 @@ menuconfig INPUT_KEYBOARD
if INPUT_KEYBOARD
+config KEYBOARD_AAED2000
+ tristate "AAED-2000 keyboard"
+ depends on MACH_AAED2000
+ select INPUT_POLLDEV
+ default y
+ help
+ Say Y here to enable the keyboard on the Agilent AAED-2000
+ development board.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aaed2000_kbd.
+
+config KEYBOARD_AMIGA
+ tristate "Amiga keyboard"
+ depends on AMIGA
+ help
+ Say Y here if you are running Linux on any AMIGA and have a keyboard
+ attached.
+
+ To compile this driver as a module, choose M here: the
+ module will be called amikbd.
+
+config ATARI_KBD_CORE
+ bool
+
+config KEYBOARD_ATARI
+ tristate "Atari keyboard"
+ depends on ATARI
+ select ATARI_KBD_CORE
+ help
+ Say Y here if you are running Linux on any Atari and have a keyboard
+ attached.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atakbd.
+
config KEYBOARD_ATKBD
tristate "AT keyboard" if EMBEDDED || !X86
default y
@@ -68,69 +104,14 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
right-hand column will be interpreted as the key shown in the
left-hand column.
-config KEYBOARD_SUNKBD
- tristate "Sun Type 4 and Type 5 keyboard"
- select SERIO
- help
- Say Y here if you want to use a Sun Type 4 or Type 5 keyboard,
- connected either to the Sun keyboard connector or to an serial
- (RS-232) port via a simple adapter.
-
- To compile this driver as a module, choose M here: the
- module will be called sunkbd.
-
-config KEYBOARD_LKKBD
- tristate "DECstation/VAXstation LK201/LK401 keyboard"
- select SERIO
- help
- Say Y here if you want to use a LK201 or LK401 style serial
- keyboard. This keyboard is also useable on PCs if you attach
- it with the inputattach program. The connector pinout is
- described within lkkbd.c.
-
- To compile this driver as a module, choose M here: the
- module will be called lkkbd.
-
-config KEYBOARD_LOCOMO
- tristate "LoCoMo Keyboard Support"
- depends on SHARP_LOCOMO && INPUT_KEYBOARD
- help
- Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
-
- To compile this driver as a module, choose M here: the
- module will be called locomokbd.
-
-config KEYBOARD_XTKBD
- tristate "XT keyboard"
- select SERIO
- help
- Say Y here if you want to use the old IBM PC/XT keyboard (or
- compatible) on your system. This is only possible with a
- parallel port keyboard adapter, you cannot connect it to the
- keyboard port on a PC that runs Linux.
-
- To compile this driver as a module, choose M here: the
- module will be called xtkbd.
-
-config KEYBOARD_NEWTON
- tristate "Newton keyboard"
- select SERIO
- help
- Say Y here if you have a Newton keyboard on a serial port.
-
- To compile this driver as a module, choose M here: the
- module will be called newtonkbd.
-
-config KEYBOARD_STOWAWAY
- tristate "Stowaway keyboard"
- select SERIO
+config KEYBOARD_BFIN
+ tristate "Blackfin BF54x keypad support"
+ depends on (BF54x && !BF544)
help
- Say Y here if you have a Stowaway keyboard on a serial port.
- Stowaway compatible keyboards like Dicota Input-PDA keyboard
- are also supported by this driver.
+ Say Y here if you want to use the BF54x keypad.
To compile this driver as a module, choose M here: the
- module will be called stowaway.
+ module will be called bf54x-keys.
config KEYBOARD_CORGI
tristate "Corgi keyboard"
@@ -143,61 +124,50 @@ config KEYBOARD_CORGI
To compile this driver as a module, choose M here: the
module will be called corgikbd.
-config KEYBOARD_SPITZ
- tristate "Spitz keyboard"
- depends on PXA_SHARPSL
- default y
+config KEYBOARD_LKKBD
+ tristate "DECstation/VAXstation LK201/LK401 keyboard"
+ select SERIO
help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
- SL-C3000 and Sl-C3100 series of PDAs.
+ Say Y here if you want to use a LK201 or LK401 style serial
+ keyboard. This keyboard is also useable on PCs if you attach
+ it with the inputattach program. The connector pinout is
+ described within lkkbd.c.
To compile this driver as a module, choose M here: the
- module will be called spitzkbd.
+ module will be called lkkbd.
-config KEYBOARD_TOSA
- tristate "Tosa keyboard"
- depends on MACH_TOSA
- default y
+config KEYBOARD_EP93XX
+ tristate "EP93xx Matrix Keypad support"
+ depends on ARCH_EP93XX
help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
+ Say Y here to enable the matrix keypad on the Cirrus EP93XX.
To compile this driver as a module, choose M here: the
- module will be called tosakbd.
+ module will be called ep93xx_keypad.
-config KEYBOARD_TOSA_USE_EXT_KEYCODES
- bool "Tosa keyboard: use extended keycodes"
- depends on KEYBOARD_TOSA
- default n
+config KEYBOARD_GPIO
+ tristate "GPIO Buttons"
+ depends on GENERIC_GPIO
help
- Say Y here to enable the tosa keyboard driver to generate extended
- (>= 127) keycodes. Be aware, that they can't be correctly interpreted
- by either console keyboard driver or by Kdrive keybd driver.
-
- Say Y only if you know, what you are doing!
+ This driver implements support for buttons connected
+ to GPIO pins of various CPUs (and some other chips).
-config KEYBOARD_AMIGA
- tristate "Amiga keyboard"
- depends on AMIGA
- help
- Say Y here if you are running Linux on any AMIGA and have a keyboard
- attached.
+ Say Y here if your device has buttons connected
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
To compile this driver as a module, choose M here: the
- module will be called amikbd.
+ module will be called gpio_keys.
-config ATARI_KBD_CORE
- bool
-
-config KEYBOARD_ATARI
- tristate "Atari keyboard"
- depends on ATARI
- select ATARI_KBD_CORE
+config KEYBOARD_MATRIX
+ tristate "GPIO driven matrix keypad support"
+ depends on GENERIC_GPIO
help
- Say Y here if you are running Linux on any Atari and have a keyboard
- attached.
+ Enable support for GPIO driven matrix keypad.
To compile this driver as a module, choose M here: the
- module will be called atakbd.
+ module will be called matrix_keypad.
config KEYBOARD_HIL_OLD
tristate "HP HIL keyboard support (simple driver)"
@@ -261,20 +231,39 @@ config KEYBOARD_LM8323
To compile this driver as a module, choose M here: the
module will be called lm8323.
-config KEYBOARD_OMAP
- tristate "TI OMAP keypad support"
- depends on (ARCH_OMAP1 || ARCH_OMAP2)
+config KEYBOARD_LOCOMO
+ tristate "LoCoMo Keyboard Support"
+ depends on SHARP_LOCOMO
help
- Say Y here if you want to use the OMAP keypad.
+ Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
To compile this driver as a module, choose M here: the
- module will be called omap-keypad.
+ module will be called locomokbd.
+
+config KEYBOARD_MAPLE
+ tristate "Maple bus keyboard"
+ depends on SH_DREAMCAST && MAPLE
+ help
+ Say Y here if you have a Dreamcast console running Linux and have
+ a keyboard attached to its Maple bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called maple_keyb.
+
+config KEYBOARD_NEWTON
+ tristate "Newton keyboard"
+ select SERIO
+ help
+ Say Y here if you have a Newton keyboard on a serial port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called newtonkbd.
config KEYBOARD_PXA27x
tristate "PXA27x/PXA3xx keypad support"
depends on PXA27x || PXA3xx
help
- Enable support for PXA27x/PXA3xx keypad controller
+ Enable support for PXA27x/PXA3xx keypad controller.
To compile this driver as a module, choose M here: the
module will be called pxa27x_keypad.
@@ -288,51 +277,38 @@ config KEYBOARD_PXA930_ROTARY
To compile this driver as a module, choose M here: the
module will be called pxa930_rotary.
-config KEYBOARD_AAED2000
- tristate "AAED-2000 keyboard"
- depends on MACH_AAED2000
- select INPUT_POLLDEV
+config KEYBOARD_SPITZ
+ tristate "Spitz keyboard"
+ depends on PXA_SHARPSL
default y
help
- Say Y here to enable the keyboard on the Agilent AAED-2000
- development board.
-
- To compile this driver as a module, choose M here: the
- module will be called aaed2000_kbd.
-
-config KEYBOARD_GPIO
- tristate "GPIO Buttons"
- depends on GENERIC_GPIO
- help
- This driver implements support for buttons connected
- to GPIO pins of various CPUs (and some other chips).
-
- Say Y here if your device has buttons connected
- directly to such GPIO pins. Your board-specific
- setup logic must also provide a platform device,
- with configuration data saying which GPIOs are used.
+ Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
+ SL-C3000 and Sl-C3100 series of PDAs.
To compile this driver as a module, choose M here: the
- module will be called gpio-keys.
+ module will be called spitzkbd.
-config KEYBOARD_MAPLE
- tristate "Maple bus keyboard"
- depends on SH_DREAMCAST && MAPLE
+config KEYBOARD_STOWAWAY
+ tristate "Stowaway keyboard"
+ select SERIO
help
- Say Y here if you have a Dreamcast console running Linux and have
- a keyboard attached to its Maple bus.
+ Say Y here if you have a Stowaway keyboard on a serial port.
+ Stowaway compatible keyboards like Dicota Input-PDA keyboard
+ are also supported by this driver.
To compile this driver as a module, choose M here: the
- module will be called maple_keyb.
+ module will be called stowaway.
-config KEYBOARD_BFIN
- tristate "Blackfin BF54x keypad support"
- depends on (BF54x && !BF544)
+config KEYBOARD_SUNKBD
+ tristate "Sun Type 4 and Type 5 keyboard"
+ select SERIO
help
- Say Y here if you want to use the BF54x keypad.
+ Say Y here if you want to use a Sun Type 4 or Type 5 keyboard,
+ connected either to the Sun keyboard connector or to an serial
+ (RS-232) port via a simple adapter.
To compile this driver as a module, choose M here: the
- module will be called bf54x-keys.
+ module will be called sunkbd.
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
@@ -344,13 +320,45 @@ config KEYBOARD_SH_KEYSC
To compile this driver as a module, choose M here: the
module will be called sh_keysc.
-config KEYBOARD_EP93XX
- tristate "EP93xx Matrix Keypad support"
- depends on ARCH_EP93XX
+config KEYBOARD_OMAP
+ tristate "TI OMAP keypad support"
+ depends on (ARCH_OMAP1 || ARCH_OMAP2)
help
- Say Y here to enable the matrix keypad on the Cirrus EP93XX.
+ Say Y here if you want to use the OMAP keypad.
To compile this driver as a module, choose M here: the
- module will be called ep93xx_keypad.
+ module will be called omap-keypad.
+
+config KEYBOARD_TOSA
+ tristate "Tosa keyboard"
+ depends on MACH_TOSA
+ default y
+ help
+ Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
+
+ To compile this driver as a module, choose M here: the
+ module will be called tosakbd.
+
+config KEYBOARD_TOSA_USE_EXT_KEYCODES
+ bool "Tosa keyboard: use extended keycodes"
+ depends on KEYBOARD_TOSA
+ help
+ Say Y here to enable the tosa keyboard driver to generate extended
+ (>= 127) keycodes. Be aware, that they can't be correctly interpreted
+ by either console keyboard driver or by Kdrive keybd driver.
+
+ Say Y only if you know, what you are doing!
+
+config KEYBOARD_XTKBD
+ tristate "XT keyboard"
+ select SERIO
+ help
+ Say Y here if you want to use the old IBM PC/XT keyboard (or
+ compatible) on your system. This is only possible with a
+ parallel port keyboard adapter, you cannot connect it to the
+ keyboard port on a PC that runs Linux.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xtkbd.
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 156b647a259b..b5b5eae9724f 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,29 +4,30 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
-obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
-obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
-obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
+obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
-obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
-obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
+obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
+obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
-obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
-obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
+obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
+obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
+obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
+obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o
+obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
+obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
+obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
+obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
-obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
-obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
-obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
-obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
-obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
-obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
-obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
+obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
+obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
+obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
+obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2157cd7de00c..efed0c9e242e 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,7 +29,8 @@
struct gpio_button_data {
struct gpio_keys_button *button;
struct input_dev *input;
- struct delayed_work work;
+ struct timer_list timer;
+ struct work_struct work;
};
struct gpio_keys_drvdata {
@@ -40,7 +41,7 @@ struct gpio_keys_drvdata {
static void gpio_keys_report_event(struct work_struct *work)
{
struct gpio_button_data *bdata =
- container_of(work, struct gpio_button_data, work.work);
+ container_of(work, struct gpio_button_data, work);
struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
@@ -50,17 +51,25 @@ static void gpio_keys_report_event(struct work_struct *work)
input_sync(input);
}
+static void gpio_keys_timer(unsigned long _data)
+{
+ struct gpio_button_data *data = (struct gpio_button_data *)_data;
+
+ schedule_work(&data->work);
+}
+
static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
struct gpio_keys_button *button = bdata->button;
- unsigned long delay;
BUG_ON(irq != gpio_to_irq(button->gpio));
- delay = button->debounce_interval ?
- msecs_to_jiffies(button->debounce_interval) : 0;
- schedule_delayed_work(&bdata->work, delay);
+ if (button->debounce_interval)
+ mod_timer(&bdata->timer,
+ jiffies + msecs_to_jiffies(button->debounce_interval));
+ else
+ schedule_work(&bdata->work);
return IRQ_HANDLED;
}
@@ -107,7 +116,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
bdata->input = input;
bdata->button = button;
- INIT_DELAYED_WORK(&bdata->work, gpio_keys_report_event);
+ setup_timer(&bdata->timer,
+ gpio_keys_timer, (unsigned long)bdata);
+ INIT_WORK(&bdata->work, gpio_keys_report_event);
error = gpio_request(button->gpio, button->desc ?: "gpio_keys");
if (error < 0) {
@@ -166,7 +177,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
fail2:
while (--i >= 0) {
free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]);
- cancel_delayed_work_sync(&ddata->data[i].work);
+ if (pdata->buttons[i].debounce_interval)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
gpio_free(pdata->buttons[i].gpio);
}
@@ -190,7 +203,9 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
for (i = 0; i < pdata->nbuttons; i++) {
int irq = gpio_to_irq(pdata->buttons[i].gpio);
free_irq(irq, &ddata->data[i]);
- cancel_delayed_work_sync(&ddata->data[i].work);
+ if (pdata->buttons[i].debounce_interval)
+ del_timer_sync(&ddata->data[i].timer);
+ cancel_work_sync(&ddata->data[i].work);
gpio_free(pdata->buttons[i].gpio);
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
new file mode 100644
index 000000000000..e9b2e7cb05be
--- /dev/null
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -0,0 +1,453 @@
+/*
+ * GPIO driven matrix keyboard driver
+ *
+ * Copyright (c) 2008 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on corgikbd.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/input/matrix_keypad.h>
+
+struct matrix_keypad {
+ const struct matrix_keypad_platform_data *pdata;
+ struct input_dev *input_dev;
+ unsigned short *keycodes;
+
+ uint32_t last_key_state[MATRIX_MAX_COLS];
+ struct delayed_work work;
+ bool scan_pending;
+ bool stopped;
+ spinlock_t lock;
+};
+
+/*
+ * NOTE: normally the GPIO has to be put into HiZ when de-activated to cause
+ * minmal side effect when scanning other columns, here it is configured to
+ * be input, and it should work on most platforms.
+ */
+static void __activate_col(const struct matrix_keypad_platform_data *pdata,
+ int col, bool on)
+{
+ bool level_on = !pdata->active_low;
+
+ if (on) {
+ gpio_direction_output(pdata->col_gpios[col], level_on);
+ } else {
+ gpio_set_value_cansleep(pdata->col_gpios[col], !level_on);
+ gpio_direction_input(pdata->col_gpios[col]);
+ }
+}
+
+static void activate_col(const struct matrix_keypad_platform_data *pdata,
+ int col, bool on)
+{
+ __activate_col(pdata, col, on);
+
+ if (on && pdata->col_scan_delay_us)
+ udelay(pdata->col_scan_delay_us);
+}
+
+static void activate_all_cols(const struct matrix_keypad_platform_data *pdata,
+ bool on)
+{
+ int col;
+
+ for (col = 0; col < pdata->num_col_gpios; col++)
+ __activate_col(pdata, col, on);
+}
+
+static bool row_asserted(const struct matrix_keypad_platform_data *pdata,
+ int row)
+{
+ return gpio_get_value_cansleep(pdata->row_gpios[row]) ?
+ !pdata->active_low : pdata->active_low;
+}
+
+static void enable_row_irqs(struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ enable_irq(gpio_to_irq(pdata->row_gpios[i]));
+}
+
+static void disable_row_irqs(struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i]));
+}
+
+/*
+ * This gets the keys from keyboard and reports it to input subsystem
+ */
+static void matrix_keypad_scan(struct work_struct *work)
+{
+ struct matrix_keypad *keypad =
+ container_of(work, struct matrix_keypad, work.work);
+ struct input_dev *input_dev = keypad->input_dev;
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ uint32_t new_state[MATRIX_MAX_COLS];
+ int row, col, code;
+
+ /* de-activate all columns for scanning */
+ activate_all_cols(pdata, false);
+
+ memset(new_state, 0, sizeof(new_state));
+
+ /* assert each column and read the row status out */
+ for (col = 0; col < pdata->num_col_gpios; col++) {
+
+ activate_col(pdata, col, true);
+
+ for (row = 0; row < pdata->num_row_gpios; row++)
+ new_state[col] |=
+ row_asserted(pdata, row) ? (1 << row) : 0;
+
+ activate_col(pdata, col, false);
+ }
+
+ for (col = 0; col < pdata->num_col_gpios; col++) {
+ uint32_t bits_changed;
+
+ bits_changed = keypad->last_key_state[col] ^ new_state[col];
+ if (bits_changed == 0)
+ continue;
+
+ for (row = 0; row < pdata->num_row_gpios; row++) {
+ if ((bits_changed & (1 << row)) == 0)
+ continue;
+
+ code = (row << 4) + col;
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev,
+ keypad->keycodes[code],
+ new_state[col] & (1 << row));
+ }
+ }
+ input_sync(input_dev);
+
+ memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+ activate_all_cols(pdata, true);
+
+ /* Enable IRQs again */
+ spin_lock_irq(&keypad->lock);
+ keypad->scan_pending = false;
+ enable_row_irqs(keypad);
+ spin_unlock_irq(&keypad->lock);
+}
+
+static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
+{
+ struct matrix_keypad *keypad = id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&keypad->lock, flags);
+
+ /*
+ * See if another IRQ beaten us to it and scheduled the
+ * scan already. In that case we should not try to
+ * disable IRQs again.
+ */
+ if (unlikely(keypad->scan_pending || keypad->stopped))
+ goto out;
+
+ disable_row_irqs(keypad);
+ keypad->scan_pending = true;
+ schedule_delayed_work(&keypad->work,
+ msecs_to_jiffies(keypad->pdata->debounce_ms));
+
+out:
+ spin_unlock_irqrestore(&keypad->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int matrix_keypad_start(struct input_dev *dev)
+{
+ struct matrix_keypad *keypad = input_get_drvdata(dev);
+
+ keypad->stopped = false;
+ mb();
+
+ /*
+ * Schedule an immediate key scan to capture current key state;
+ * columns will be activated and IRQs be enabled after the scan.
+ */
+ schedule_delayed_work(&keypad->work, 0);
+
+ return 0;
+}
+
+static void matrix_keypad_stop(struct input_dev *dev)
+{
+ struct matrix_keypad *keypad = input_get_drvdata(dev);
+
+ keypad->stopped = true;
+ mb();
+ flush_work(&keypad->work.work);
+ /*
+ * matrix_keypad_scan() will leave IRQs enabled;
+ * we should disable them now.
+ */
+ disable_row_irqs(keypad);
+}
+
+#ifdef CONFIG_PM
+static int matrix_keypad_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct matrix_keypad *keypad = platform_get_drvdata(pdev);
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ matrix_keypad_stop(keypad->input_dev);
+
+ if (device_may_wakeup(&pdev->dev))
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ enable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+
+ return 0;
+}
+
+static int matrix_keypad_resume(struct platform_device *pdev)
+{
+ struct matrix_keypad *keypad = platform_get_drvdata(pdev);
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ if (device_may_wakeup(&pdev->dev))
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ disable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+
+ matrix_keypad_start(keypad->input_dev);
+
+ return 0;
+}
+#else
+#define matrix_keypad_suspend NULL
+#define matrix_keypad_resume NULL
+#endif
+
+static int __devinit init_matrix_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i, err = -EINVAL;
+
+ /* initialized strobe lines as outputs, activated */
+ for (i = 0; i < pdata->num_col_gpios; i++) {
+ err = gpio_request(pdata->col_gpios[i], "matrix_kbd_col");
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to request GPIO%d for COL%d\n",
+ pdata->col_gpios[i], i);
+ goto err_free_cols;
+ }
+
+ gpio_direction_output(pdata->col_gpios[i], !pdata->active_low);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ err = gpio_request(pdata->row_gpios[i], "matrix_kbd_row");
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to request GPIO%d for ROW%d\n",
+ pdata->row_gpios[i], i);
+ goto err_free_rows;
+ }
+
+ gpio_direction_input(pdata->row_gpios[i]);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ err = request_irq(gpio_to_irq(pdata->row_gpios[i]),
+ matrix_keypad_interrupt,
+ IRQF_DISABLED |
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "matrix-keypad", keypad);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to acquire interrupt for GPIO line %i\n",
+ pdata->row_gpios[i]);
+ goto err_free_irqs;
+ }
+ }
+
+ /* initialized as disabled - enabled by input->open */
+ disable_row_irqs(keypad);
+ return 0;
+
+err_free_irqs:
+ while (--i >= 0)
+ free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
+ i = pdata->num_row_gpios;
+err_free_rows:
+ while (--i >= 0)
+ gpio_free(pdata->row_gpios[i]);
+ i = pdata->num_col_gpios;
+err_free_cols:
+ while (--i >= 0)
+ gpio_free(pdata->col_gpios[i]);
+
+ return err;
+}
+
+static int __devinit matrix_keypad_probe(struct platform_device *pdev)
+{
+ const struct matrix_keypad_platform_data *pdata;
+ const struct matrix_keymap_data *keymap_data;
+ struct matrix_keypad *keypad;
+ struct input_dev *input_dev;
+ unsigned short *keycodes;
+ int i;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return -EINVAL;
+ }
+
+ keymap_data = pdata->keymap_data;
+ if (!keymap_data) {
+ dev_err(&pdev->dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+
+ if (!keymap_data->max_keymap_size) {
+ dev_err(&pdev->dev, "invalid keymap data supplied\n");
+ return -EINVAL;
+ }
+
+ keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
+ keycodes = kzalloc(keymap_data->max_keymap_size *
+ sizeof(keypad->keycodes),
+ GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!keypad || !keycodes || !input_dev) {
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ keypad->input_dev = input_dev;
+ keypad->pdata = pdata;
+ keypad->keycodes = keycodes;
+ keypad->stopped = true;
+ INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
+ spin_lock_init(&keypad->lock);
+
+ input_dev->name = pdev->name;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_dev->open = matrix_keypad_start;
+ input_dev->close = matrix_keypad_stop;
+
+ input_dev->keycode = keycodes;
+ input_dev->keycodesize = sizeof(*keycodes);
+ input_dev->keycodemax = keymap_data->max_keymap_size;
+
+ for (i = 0; i < keymap_data->keymap_size; i++) {
+ unsigned int key = keymap_data->keymap[i];
+ unsigned int row = KEY_ROW(key);
+ unsigned int col = KEY_COL(key);
+ unsigned short code = KEY_VAL(key);
+
+ keycodes[(row << 4) + col] = code;
+ __set_bit(code, input_dev->keybit);
+ }
+ __clear_bit(KEY_RESERVED, input_dev->keybit);
+
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, keypad);
+
+ err = init_matrix_gpio(pdev, keypad);
+ if (err)
+ goto err_free_mem;
+
+ err = input_register_device(keypad->input_dev);
+ if (err)
+ goto err_free_mem;
+
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+ platform_set_drvdata(pdev, keypad);
+
+ return 0;
+
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(keycodes);
+ kfree(keypad);
+ return err;
+}
+
+static int __devexit matrix_keypad_remove(struct platform_device *pdev)
+{
+ struct matrix_keypad *keypad = platform_get_drvdata(pdev);
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
+ gpio_free(pdata->row_gpios[i]);
+ }
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpio_free(pdata->col_gpios[i]);
+
+ input_unregister_device(keypad->input_dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(keypad->keycodes);
+ kfree(keypad);
+
+ return 0;
+}
+
+static struct platform_driver matrix_keypad_driver = {
+ .probe = matrix_keypad_probe,
+ .remove = __devexit_p(matrix_keypad_remove),
+ .suspend = matrix_keypad_suspend,
+ .resume = matrix_keypad_resume,
+ .driver = {
+ .name = "matrix-keypad",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init matrix_keypad_init(void)
+{
+ return platform_driver_register(&matrix_keypad_driver);
+}
+
+static void __exit matrix_keypad_exit(void)
+{
+ platform_driver_unregister(&matrix_keypad_driver);
+}
+
+module_init(matrix_keypad_init);
+module_exit(matrix_keypad_exit);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:matrix-keypad");
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 2adf9cb265da..d114d3a9e1e9 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -1,7 +1,7 @@
/*
* Cobalt button interface driver.
*
- * Copyright (C) 2007-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -148,7 +148,7 @@ static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
return 0;
}
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("Cobalt button interface driver");
MODULE_LICENSE("GPL");
/* work with hotplug and coldplug */
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index a63315ce4a6c..0918acae584a 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -23,30 +23,16 @@
* pressed, or its autorepeat kicks in, an event is sent. This driver
* read those events from the small (32 event) queue and reports them.
*
- * Because we communicate with the MSP430 using I2C, and all I2C calls
- * in Linux sleep, we need to cons up a kind of threaded IRQ handler
- * using a work_struct. The IRQ is active low, but we use it through
- * the GPIO controller so we can trigger on falling edges.
- *
* Note that physically there can only be one of these devices.
*
* This driver was tested with firmware revision A4.
*/
struct dm355evm_keys {
- struct work_struct work;
struct input_dev *input;
struct device *dev;
int irq;
};
-static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
-{
- struct dm355evm_keys *keys = _keys;
-
- schedule_work(&keys->work);
- return IRQ_HANDLED;
-}
-
/* These initial keycodes can be remapped by dm355evm_setkeycode(). */
static struct {
u16 event;
@@ -110,13 +96,12 @@ static struct {
{ 0x3169, KEY_PAUSE, },
};
-static void dm355evm_keys_work(struct work_struct *work)
+/* runs in an IRQ thread -- can (and will!) sleep */
+static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
{
- struct dm355evm_keys *keys;
+ struct dm355evm_keys *keys = _keys;
int status;
- keys = container_of(work, struct dm355evm_keys, work);
-
/* For simplicity we ignore INPUT_COUNT and just read
* events until we get the "queue empty" indicator.
* Reading INPUT_LOW decrements the count.
@@ -183,6 +168,19 @@ static void dm355evm_keys_work(struct work_struct *work)
input_report_key(keys->input, keycode, 0);
input_sync(keys->input);
}
+ return IRQ_HANDLED;
+}
+
+/*
+ * Because we communicate with the MSP430 using I2C, and all I2C calls
+ * in Linux sleep, we use a threaded IRQ handler. The IRQ itself is
+ * active low, but we go through the GPIO controller so we can trigger
+ * on falling edges and not worry about enabling/disabling the IRQ in
+ * the keypress handling path.
+ */
+static irqreturn_t dm355evm_keys_hardirq(int irq, void *_keys)
+{
+ return IRQ_WAKE_THREAD;
}
static int dm355evm_setkeycode(struct input_dev *dev, int index, int keycode)
@@ -233,7 +231,6 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
keys->dev = &pdev->dev;
keys->input = input;
- INIT_WORK(&keys->work, dm355evm_keys_work);
/* set up "threaded IRQ handler" */
status = platform_get_irq(pdev, 0);
@@ -260,9 +257,10 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
/* REVISIT: flush the event queue? */
- status = request_irq(keys->irq, dm355evm_keys_irq,
- IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), keys);
+ status = request_threaded_irq(keys->irq,
+ dm355evm_keys_hardirq, dm355evm_keys_irq,
+ IRQF_TRIGGER_FALLING,
+ dev_name(&pdev->dev), keys);
if (status < 0)
goto fail1;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index fb8a3cd3ffd0..924e8ed7f2cf 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -392,6 +392,34 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
},
},
+ {
+ .ident = "Acer Aspire One 150",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ },
+ },
+ {
+ .ident = "Advent 4211",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ },
+ },
+ {
+ .ident = "Medion Akoya Mini E1210",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
+ },
+ },
+ {
+ .ident = "Mivvy M310",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 38bf86384aeb..c896d6a21b7e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -384,6 +384,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
wacom_report_key(wcombo, BTN_STYLUS2, 0);
wacom_report_key(wcombo, BTN_TOUCH, 0);
wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ if (wacom->features->type >= INTUOS3S)
+ wacom_report_abs(wcombo, ABS_Z, 0);
}
wacom_report_key(wcombo, wacom->tool[idx], 0);
wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
@@ -836,6 +838,7 @@ static struct wacom_features wacom_features[] = {
{ "Wacom DTU710", 8, 34080, 27660, 511, 0, PL },
{ "Wacom DTF521", 8, 6282, 4762, 511, 0, PL },
{ "Wacom DTF720", 8, 6858, 5506, 511, 0, PL },
+ { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL },
{ "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU },
{ "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
{ "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
@@ -897,8 +900,9 @@ static struct usb_device_id wacom_ids[] = {
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x37) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x38) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x39) },
- { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC4) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC0) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xC2) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x03) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x41) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x42) },
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index ec5169604a6a..2d91049571a4 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -294,32 +294,33 @@ struct reply_t gigaset_tab_cid[] =
{RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
{RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
{RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
- {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
+ {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"},
{RSP_OK, 607,607, -1, 608,-1},
- //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
{RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
- {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
- {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
- {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
{RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
{EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
- /* dialing */
- {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}},
- {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}},
- {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */
-
- /* connection established */
- {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
- {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
-
- {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout
+ /* optional dialing responses */
+ {EV_BC_OPEN, 650,650, -1, 651,-1},
+ {RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}},
+ {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}},
+
+ /* connect */
+ {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
+ {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
+ ACT_NOTIFY_BC_UP}},
+ {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
+ {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
+ ACT_NOTIFY_BC_UP}},
+ {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}},
/* remote hangup */
- {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
- {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
+ {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
+ {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
{RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
/* hangup */
@@ -358,7 +359,8 @@ struct reply_t gigaset_tab_cid[] =
{RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
{RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
- {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}},
+ {EV_BC_OPEN, 750,750, -1, 751,-1},
+ {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}},
/* B channel closed (general case) */
{EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
@@ -876,12 +878,6 @@ static void bchannel_down(struct bc_state *bcs)
static void bchannel_up(struct bc_state *bcs)
{
- if (!(bcs->chstate & CHS_D_UP)) {
- dev_notice(bcs->cs->dev, "%s: D channel not up\n", __func__);
- bcs->chstate |= CHS_D_UP;
- gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
- }
-
if (bcs->chstate & CHS_B_UP) {
dev_notice(bcs->cs->dev, "%s: B channel already up\n",
__func__);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index db3a1e4cd489..bed38fcc432b 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -174,12 +174,6 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
pr_err("invalid size %d\n", size);
return -EINVAL;
}
- src = iwb->read;
- if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
- (read < src && limit >= src))) {
- pr_err("isoc write buffer frame reservation violated\n");
- return -EFAULT;
- }
#endif
if (read < write) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b684d9..7c8e7122aaa9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@ config LEDS_ALIX2
depends on LEDS_CLASS && X86 && EXPERIMENTAL
help
This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+ You have to set leds-alix2.force=1 for boards with Award BIOS.
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF
of_platform devices. For instance, LEDs which are listed in a "dts"
file.
-config LEDS_LP5521
- tristate "LED Support for the LP5521 LEDs"
+config LEDS_LP3944
+ tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS && I2C
help
- If you say 'Y' here you get support for the National Semiconductor
- LP5521 LED driver used in n8x0 boards.
+ This option enables support for LEDs connected to the National
+ Semiconductor LP3944 Lighting Management Unit (LMU) also known as
+ Fun Light Chip.
- This driver can be built as a module by choosing 'M'. The module
- will be called leds-lp5521.
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3944.
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4dcf92f..e8cdcf77a4c3 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd7730dfc8..731d4eef3425 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
static int force = 0;
module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
struct alix_led {
struct led_classdev cdev;
@@ -155,6 +155,11 @@ static int __init alix_led_init(void)
goto out;
}
+ /* enable output on GPIO for LED 1,2,3 */
+ outl(1 << 6, 0x6104);
+ outl(1 << 9, 0x6184);
+ outl(1 << 11, 0x6184);
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb3a9b2..779d7f262c04 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@ struct bd2802_led {
enum led_ids led_id;
enum led_colors color;
enum led_bits state;
+
+ /* General attributes of RGB LEDs */
+ int wave_pattern;
+ int rgb_current;
};
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id,
bd2802_reset_cancel(led);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id,
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
- bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF);
+ bd2802_write_byte(led->client, reg, led->wave_pattern);
bd2802_enable(led, id);
bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led)
ret = device_create_file(&led->client->dev,
bd2802_addr_attributes[i]);
if (ret) {
- dev_err(&led->client->dev, "failed to sysfs file %s\n",
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
bd2802_addr_attributes[i]->attr.name);
goto failed_remove_files;
}
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = {
.store = bd2802_store_adv_conf,
};
+#define BD2802_CONTROL_ATTR(attr_name, name_str) \
+static ssize_t bd2802_show_##attr_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ ssize_t ret; \
+ down_read(&led->rwsem); \
+ ret = sprintf(buf, "0x%02x\n", led->attr_name); \
+ up_read(&led->rwsem); \
+ return ret; \
+} \
+static ssize_t bd2802_store_##attr_name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ unsigned long val; \
+ int ret; \
+ if (!count) \
+ return -EINVAL; \
+ ret = strict_strtoul(buf, 16, &val); \
+ if (ret) \
+ return ret; \
+ down_write(&led->rwsem); \
+ led->attr_name = val; \
+ up_write(&led->rwsem); \
+ return count; \
+} \
+static struct device_attribute bd2802_##attr_name##_attr = { \
+ .attr = { \
+ .name = name_str, \
+ .mode = 0644, \
+ .owner = THIS_MODULE \
+ }, \
+ .show = bd2802_show_##attr_name, \
+ .store = bd2802_store_##attr_name, \
+};
+
+BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
+BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
+
+static struct device_attribute *bd2802_attributes[] = {
+ &bd2802_adv_conf_attr,
+ &bd2802_wave_pattern_attr,
+ &bd2802_rgb_current_attr,
+};
+
static void bd2802_led_work(struct work_struct *work)
{
struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1r.brightness = LED_OFF;
led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
- led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
if (ret < 0) {
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1g.brightness = LED_OFF;
led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
- led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
if (ret < 0) {
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1b.brightness = LED_OFF;
led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
- led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
if (ret < 0) {
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2r.brightness = LED_OFF;
led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
- led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
if (ret < 0) {
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2g.brightness = LED_OFF;
led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
- led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
if (ret < 0) {
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
{
struct bd2802_led *led;
struct bd2802_led_platform_data *pdata;
- int ret;
+ int ret, i;
led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
if (!led) {
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client,
/* To save the power, reset BD2802 after detecting */
gpio_set_value(led->pdata->reset_gpio, 0);
+ /* Default attributes */
+ led->wave_pattern = BD2802_PATTERN_HALF;
+ led->rgb_current = BD2802_CURRENT_032;
+
init_rwsem(&led->rwsem);
- ret = device_create_file(&client->dev, &bd2802_adv_conf_attr);
- if (ret) {
- dev_err(&client->dev, "failed to create sysfs file %s\n",
- bd2802_adv_conf_attr.attr.name);
- goto failed_free;
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
+ ret = device_create_file(&led->client->dev,
+ bd2802_attributes[i]);
+ if (ret) {
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
+ bd2802_attributes[i]->attr.name);
+ goto failed_unregister_dev_file;
+ }
}
ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client,
return 0;
failed_unregister_dev_file:
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i--; i >= 0; i--)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
failed_free:
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -697,12 +750,14 @@ failed_free:
static int __exit bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
+ int i;
- bd2802_unregister_led_classdev(led);
gpio_set_value(led->pdata->reset_gpio, 0);
+ bd2802_unregister_led_classdev(led);
if (led->adf_on)
bd2802_disable_adv_conf(led);
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client)
struct bd2802_led *led = i2c_get_clientdata(client);
if (!bd2802_is_all_off(led) || led->adf_on) {
- gpio_set_value(led->pdata->reset_gpio, 1);
- udelay(100);
+ bd2802_reset_cancel(led);
bd2802_restore_state(led);
}
@@ -762,4 +816,4 @@ module_exit(bd2802_exit);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index ff0e8c3fbf9b..5f1ce810815f 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -1,7 +1,7 @@
/*
* LEDs driver for the Cobalt Raq series.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d2109054de85..6b06638eb5b4 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, unsigned long *, unsigned long *))
{
- int ret;
+ int ret, state;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->cdev.blink_set = gpio_blink_set;
}
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = LED_OFF;
+ if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
+ state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
+ else
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led)
}
#ifdef CONFIG_LEDS_GPIO_PLATFORM
-static int gpio_led_probe(struct platform_device *pdev)
+static int __devinit gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
memset(&led, 0, sizeof(led));
for_each_child_of_node(np, child) {
enum of_gpio_flags flags;
+ const char *state;
led.gpio = of_get_gpio_flags(child, 0, &flags);
led.active_low = flags & OF_GPIO_ACTIVE_LOW;
led.name = of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "keep"))
+ led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+ else if(!strcmp(state, "on"))
+ led.default_state = LEDS_GPIO_DEFSTATE_ON;
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
&ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 000000000000..5946208ba26e
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
+/*
+ * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * I2C driver for National Semiconductor LP3944 Funlight Chip
+ * http://www.national.com/pf/LP/LP3944.html
+ *
+ * This helper chip can drive up to 8 leds, with two programmable DIM modes;
+ * it could even be used as a gpio expander but this driver assumes it is used
+ * as a led controller.
+ *
+ * The DIM modes are used to set _blink_ patterns for leds, the pattern is
+ * specified supplying two parameters:
+ * - period: from 0s to 1.6s
+ * - duty cycle: percentage of the period the led is on, from 0 to 100
+ *
+ * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+ * leds, the camera flash light and the displays backlights.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds-lp3944.h>
+
+/* Read Only Registers */
+#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
+#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
+
+#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
+#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
+#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
+#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
+#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
+#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
+
+/* These registers are not used to control leds in LP3944, they can store
+ * arbitrary values which the chip will ignore.
+ */
+#define LP3944_REG_REGISTER8 0x08
+#define LP3944_REG_REGISTER9 0x09
+
+#define LP3944_DIM0 0
+#define LP3944_DIM1 1
+
+/* period in ms */
+#define LP3944_PERIOD_MIN 0
+#define LP3944_PERIOD_MAX 1600
+
+/* duty cycle is a percentage */
+#define LP3944_DUTY_CYCLE_MIN 0
+#define LP3944_DUTY_CYCLE_MAX 100
+
+#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
+
+/* Saved data */
+struct lp3944_led_data {
+ u8 id;
+ enum lp3944_type type;
+ enum lp3944_status status;
+ struct led_classdev ldev;
+ struct i2c_client *client;
+ struct work_struct work;
+};
+
+struct lp3944_data {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct lp3944_led_data leds[LP3944_LEDS_MAX];
+};
+
+static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
+{
+ int tmp;
+
+ tmp = i2c_smbus_read_byte_data(client, reg);
+ if (tmp < 0)
+ return -EINVAL;
+
+ *value = tmp;
+
+ return 0;
+}
+
+static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+/**
+ * Set the period for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @period: period of a blink, that is a on/off cycle, expressed in ms.
+ */
+static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
+{
+ u8 psc_reg;
+ u8 psc_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ psc_reg = LP3944_REG_PSC0;
+ else if (dim == LP3944_DIM1)
+ psc_reg = LP3944_REG_PSC1;
+ else
+ return -EINVAL;
+
+ /* Convert period to Prescaler value */
+ if (period > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ psc_value = (period * 255) / LP3944_PERIOD_MAX;
+
+ err = lp3944_reg_write(client, psc_reg, psc_value);
+
+ return err;
+}
+
+/**
+ * Set the duty cycle for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @duty_cycle: percentage of a period during which a led is ON
+ */
+static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
+ u8 duty_cycle)
+{
+ u8 pwm_reg;
+ u8 pwm_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ pwm_reg = LP3944_REG_PWM0;
+ else if (dim == LP3944_DIM1)
+ pwm_reg = LP3944_REG_PWM1;
+ else
+ return -EINVAL;
+
+ /* Convert duty cycle to PWM value */
+ if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
+ return -EINVAL;
+
+ pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
+
+ err = lp3944_reg_write(client, pwm_reg, pwm_value);
+
+ return err;
+}
+
+/**
+ * Set the led status
+ *
+ * @led: a lp3944_led_data structure
+ * @status: one of LP3944_LED_STATUS_OFF
+ * LP3944_LED_STATUS_ON
+ * LP3944_LED_STATUS_DIM0
+ * LP3944_LED_STATUS_DIM1
+ */
+static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
+{
+ struct lp3944_data *data = i2c_get_clientdata(led->client);
+ u8 id = led->id;
+ u8 reg;
+ u8 val = 0;
+ int err;
+
+ dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
+ __func__, led->ldev.name, status);
+
+ switch (id) {
+ case LP3944_LED0:
+ case LP3944_LED1:
+ case LP3944_LED2:
+ case LP3944_LED3:
+ reg = LP3944_REG_LS0;
+ break;
+ case LP3944_LED4:
+ case LP3944_LED5:
+ case LP3944_LED6:
+ case LP3944_LED7:
+ id -= LP3944_LED4;
+ reg = LP3944_REG_LS1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (status > LP3944_LED_STATUS_DIM1)
+ return -EINVAL;
+
+ /* invert only 0 and 1, leave unchanged the other values,
+ * remember we are abusing status to set blink patterns
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
+ status = 1 - status;
+
+ mutex_lock(&data->lock);
+ lp3944_reg_read(led->client, reg, &val);
+
+ val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
+ val |= (status << (id << 1));
+
+ dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
+ __func__, led->ldev.name, reg, id, status, val);
+
+ /* set led status */
+ err = lp3944_reg_write(led->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int lp3944_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+ u16 period;
+ u8 duty_cycle;
+ int err;
+
+ /* units are in ms */
+ if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* Special case: the leds subsystem requires a default user
+ * friendly blink pattern for the LED. Let's blink the led
+ * slowly (1Hz).
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ period = (*delay_on) + (*delay_off);
+
+ /* duty_cycle is the percentage of period during which the led is ON */
+ duty_cycle = 100 * (*delay_on) / period;
+
+ /* invert duty cycle for inverted leds, this has the same effect of
+ * swapping delay_on and delay_off
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED)
+ duty_cycle = 100 - duty_cycle;
+
+ /* NOTE: using always the first DIM mode, this means that all leds
+ * will have the same blinking pattern.
+ *
+ * We could find a way later to have two leds blinking in hardware
+ * with different patterns at the same time, falling back to software
+ * control for the other ones.
+ */
+ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
+ if (err)
+ return err;
+
+ err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
+ if (err)
+ return err;
+
+ dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
+ __func__);
+
+ led->status = LP3944_LED_STATUS_DIM0;
+ schedule_work(&led->work);
+
+ return 0;
+}
+
+static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+
+ dev_dbg(&led->client->dev, "%s: %s, %d\n",
+ __func__, led_cdev->name, brightness);
+
+ led->status = brightness;
+ schedule_work(&led->work);
+}
+
+static void lp3944_led_work(struct work_struct *work)
+{
+ struct lp3944_led_data *led;
+
+ led = container_of(work, struct lp3944_led_data, work);
+ lp3944_led_set(led, led->status);
+}
+
+static int lp3944_configure(struct i2c_client *client,
+ struct lp3944_data *data,
+ struct lp3944_platform_data *pdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < pdata->leds_size; i++) {
+ struct lp3944_led *pled = &pdata->leds[i];
+ struct lp3944_led_data *led = &data->leds[i];
+ led->client = client;
+ led->id = i;
+
+ switch (pled->type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led->type = pled->type;
+ led->status = pled->status;
+ led->ldev.name = pled->name;
+ led->ldev.max_brightness = 1;
+ led->ldev.brightness_set = lp3944_led_set_brightness;
+ led->ldev.blink_set = lp3944_led_set_blink;
+ led->ldev.flags = LED_CORE_SUSPENDRESUME;
+
+ INIT_WORK(&led->work, lp3944_led_work);
+ err = led_classdev_register(&client->dev, &led->ldev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led->ldev.name);
+ goto exit;
+ }
+
+ /* to expose the default value to userspace */
+ led->ldev.brightness = led->status;
+
+ /* Set the default led status */
+ err = lp3944_led_set(led, led->status);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "%s couldn't set STATUS %d\n",
+ led->ldev.name, led->status);
+ goto exit;
+ }
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+
+ }
+ }
+ return 0;
+
+exit:
+ if (i > 0)
+ for (i = i - 1; i >= 0; i--)
+ switch (pdata->leds[i].type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int __devinit lp3944_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
+ struct lp3944_data *data;
+
+ if (lp3944_pdata == NULL) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ /* Let's see whether this adapter can support what we need. */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "insufficient functionality!\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ dev_info(&client->dev, "lp3944 enabled\n");
+
+ lp3944_configure(client, data, lp3944_pdata);
+ return 0;
+}
+
+static int __devexit lp3944_remove(struct i2c_client *client)
+{
+ struct lp3944_platform_data *pdata = client->dev.platform_data;
+ struct lp3944_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->leds_size; i++)
+ switch (data->leds[i].type) {
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/* lp3944 i2c driver struct */
+static const struct i2c_device_id lp3944_id[] = {
+ {"lp3944", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp3944_id);
+
+static struct i2c_driver lp3944_driver = {
+ .driver = {
+ .name = "lp3944",
+ },
+ .probe = lp3944_probe,
+ .remove = __devexit_p(lp3944_remove),
+ .id_table = lp3944_id,
+};
+
+static int __init lp3944_module_init(void)
+{
+ return i2c_add_driver(&lp3944_driver);
+}
+
+static void __exit lp3944_module_exit(void)
+{
+ i2c_del_driver(&lp3944_driver);
+}
+
+module_init(lp3944_module_init);
+module_exit(lp3944_module_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("LP3944 Fun Light Chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244fdcab..dba8921240f2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@ struct pca9532_data {
struct pca9532_led leds[16];
struct mutex update_lock;
struct input_dev *idev;
- struct work_struct work;
+ struct work_struct work;
u8 pwm[2];
u8 psc[2];
};
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
if (b > 0xFF)
return -EINVAL;
data->pwm[pwm] = b;
- data->psc[pwm] = blink;
- return 0;
+ data->psc[pwm] = blink;
+ return 0;
}
static int pca9532_setpwm(struct i2c_client *client, int pwm)
{
- struct pca9532_data *data = i2c_get_clientdata(client);
- mutex_lock(&data->update_lock);
+ struct pca9532_data *data = i2c_get_clientdata(client);
+ mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
data->pwm[pwm]);
i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
led->state = PCA9532_ON;
else {
led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
- err = pca9532_calcpwm(led->client, 0, 0, value);
+ err = pca9532_calcpwm(led->client, 0, 0, value);
if (err)
return; /* XXX: led api doesn't allow error code? */
}
- schedule_work(&led->work);
+ schedule_work(&led->work);
}
static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
struct pca9532_led *led = ldev_to_led(led_cdev);
struct i2c_client *client = led->client;
int psc;
- int err = 0;
+ int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
/* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
/* Thecus specific: only use PSC/PWM 0 */
psc = (*delay_on * 152-1)/1000;
- err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
- if (err)
- return err;
- schedule_work(&led->work);
- return 0;
+ err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+ if (err)
+ return err;
+ schedule_work(&led->work);
+ return 0;
}
static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
else
data->pwm[1] = 0;
- schedule_work(&data->work);
+ schedule_work(&data->work);
- return 0;
+ return 0;
}
static void pca9532_input_work(struct work_struct *work)
{
- struct pca9532_data *data;
- data = container_of(work, struct pca9532_data, work);
+ struct pca9532_data *data;
+ data = container_of(work, struct pca9532_data, work);
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
data->pwm[1]);
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work)
static void pca9532_led_work(struct work_struct *work)
{
- struct pca9532_led *led;
- led = container_of(work, struct pca9532_led, work);
- if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
- pca9532_setled(led);
+ struct pca9532_led *led;
+ led = container_of(work, struct pca9532_led, work);
+ if (led->state == PCA9532_PWM0)
+ pca9532_setpwm(led->client, 0);
+ pca9532_setled(led);
}
static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client,
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set = pca9532_set_brightness;
led->ldev.blink_set = pca9532_set_blink;
- INIT_WORK(&led->work, pca9532_led_work);
+ INIT_WORK(&led->work, pca9532_led_work);
err = led_classdev_register(&client->dev, &led->ldev);
if (err < 0) {
dev_err(&client->dev,
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
BIT_MASK(SND_TONE);
data->idev->event = pca9532_event;
input_set_drvdata(data->idev, data);
- INIT_WORK(&data->work, pca9532_input_work);
+ INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
}
@@ -283,13 +283,13 @@ exit:
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735cb..01c591923793 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -38,8 +38,6 @@ struct lguest_pages
#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */
#define CHANGED_ALL 3
-struct lguest;
-
struct lg_cpu {
unsigned int id;
struct lguest *lg;
@@ -82,7 +80,7 @@ struct lg_cpu {
struct lg_eventfd {
unsigned long addr;
- struct file *event;
+ struct eventfd_ctx *event;
};
struct lg_eventfd_map {
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e297121058..9f9a2953b383 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
/* Now append new entry. */
new->map[new->num].addr = addr;
- new->map[new->num].event = eventfd_fget(fd);
+ new->map[new->num].event = eventfd_ctx_fdget(fd);
if (IS_ERR(new->map[new->num].event)) {
kfree(new);
return PTR_ERR(new->map[new->num].event);
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
/* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++)
- fput(lg->eventfds->map[i].event);
+ eventfd_ctx_put(lg->eventfds->map[i].event);
kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 6e149f4a1fff..a0f68386c12f 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
+#ifdef CONFIG_PCI
+ /* Set the DMA ops to the ones from the PCI device, this could be
+ * fishy if we didn't know that on PowerMac it's always direct ops
+ * or iommu ops that will work fine
+ */
+ dev->ofdev.dev.archdata.dma_ops =
+ chip->lbus.pdev->dev.archdata.dma_ops;
+ dev->ofdev.dev.archdata.dma_data =
+ chip->lbus.pdev->dev.archdata.dma_data;
+#endif /* CONFIG_PCI */
+
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index c3ae51584b12..3710ff88fc10 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store **store)
{
int r = 0;
- struct dm_exception_store_type *type;
+ struct dm_exception_store_type *type = NULL;
struct dm_exception_store *tmp_store;
char persistent;
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
persistent = toupper(*argv[1]);
- if (persistent != 'P' && persistent != 'N') {
+ if (persistent == 'P')
+ type = get_type("P");
+ else if (persistent == 'N')
+ type = get_type("N");
+ else {
ti->error = "Persistent flag is not P or N";
return -EINVAL;
}
- type = get_type(&persistent);
if (!type) {
ti->error = "Exception store type not recognised";
r = -EINVAL;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4899ebe767c8..2cba557d9e61 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
- if (blk_stack_limits(limits, &q->limits, start) < 0)
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
DMWARN("%s: target device %s is misaligned",
dm_device_name(ti->table->md), bdevname(bdev, b));
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c6d4ee8921d..9acd54a5cffb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9b..5810fa906af0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
rdev->sectors = sectors * mddev->chunk_sectors;
}
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52cb..0f4a70c43ffc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3573,7 +3573,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3601,7 +3602,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3844,11 +3846,9 @@ static int md_alloc(dev_t dev, char *name)
flush_scheduled_work();
mutex_lock(&disks_mutex);
- if (mddev->gendisk) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -EEXIST;
- }
+ error = -EEXIST;
+ if (mddev->gendisk)
+ goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3860,15 @@ static int md_alloc(dev_t dev, char *name)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
- return -EEXIST;
+ goto abort;
}
spin_unlock(&all_mddevs_lock);
}
+ error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mddev->queue) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -ENOMEM;
- }
+ if (!mddev->queue)
+ goto abort;
mddev->queue->queuedata = mddev;
/* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3878,9 @@ static int md_alloc(dev_t dev, char *name)
disk = alloc_disk(1 << shift);
if (!disk) {
- mutex_unlock(&disks_mutex);
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
- mddev_put(mddev);
- return -ENOMEM;
+ goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -3906,16 +3902,22 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
- mutex_unlock(&disks_mutex);
- if (error)
+ if (error) {
+ /* This isn't possible, but as kobject_init_and_add is marked
+ * __must_check, we must do something with the result
+ */
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
- else {
+ error = 0;
+ }
+ abort:
+ mutex_unlock(&disks_mutex);
+ if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
- return 0;
+ return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -6334,10 +6336,16 @@ void md_do_sync(mddev_t *mddev)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- if (j >= mddev->resync_max)
- wait_event(mddev->recovery_wait,
- mddev->resync_max > j
- || kthread_should_stop());
+ while (j >= mddev->resync_max && !kthread_should_stop()) {
+ /* As this condition is controlled by user-space,
+ * we can block indefinitely, so use '_interruptible'
+ * to avoid triggering warnings.
+ */
+ flush_signals(current); /* just in case */
+ wait_event_interruptible(mddev->recovery_wait,
+ mddev->resync_max > j
+ || kthread_should_stop());
+ }
if (kthread_should_stop())
goto interrupted;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa6598..237fe3fd235c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
q = rdev->bdev->bd_disk->queue;
- blk_queue_stack_limits(mddev->queue, q);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
disk = conf->multipaths + disk_idx;
disk->rdev = rdev;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d8695..335f490dcad6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
}
dev[j] = rdev1;
- blk_queue_stack_limits(mddev->queue,
- rdev1->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
mddev->chunk_sectors << 9);
goto abort;
}
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
printk(KERN_INFO "raid0: done.\n");
mddev->private = conf;
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef57..0569efba0c02 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (mirror = first; mirror <= last; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..7298a5e5a183 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for ( ; mirror <= last ; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int run(mddev_t *mddev)
{
conf_t *conf;
- int i, disk_idx;
+ int i, disk_idx, chunk_size;
mirror_info_t *disk;
mdk_rdev_t *rdev;
int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->raid_disks % conf->near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+ else
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks / conf->near_copies));
+
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f9f991e6e138..37835538b58e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
goto retry;
}
}
- /* FIXME what if we get a false positive because these
- * are being updated.
- */
- if (logical_sector >= mddev->suspend_lo &&
+
+ if (bio_data_dir(bi) == WRITE &&
+ logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
- schedule();
+ /* As the suspend_* range is controlled by
+ * userspace, we want an interruptible
+ * wait.
+ */
+ flush_signals(current);
+ prepare_to_wait(&conf->wait_for_overlap,
+ &w, TASK_INTERRUPTIBLE);
+ if (logical_sector >= mddev->suspend_lo &&
+ logical_sector < mddev->suspend_hi)
+ schedule();
goto retry;
}
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
- int working_disks = 0;
+ int working_disks = 0, chunk_size;
mdk_rdev_t *rdev;
if (mddev->recovery_cp != MaxSector)
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks - conf->max_degraded));
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
return 0;
abort:
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index b6da9c3873fe..aa20ce8cc668 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -1096,8 +1096,19 @@ static int xc2028_set_params(struct dvb_frontend *fe,
}
/* All S-code tables need a 200kHz shift */
- if (priv->ctrl.demod)
+ if (priv->ctrl.demod) {
demod = priv->ctrl.demod + 200;
+ /*
+ * The DTV7 S-code table needs a 700 kHz shift.
+ * Thanks to Terry Wu <terrywu2009@gmail.com> for reporting this
+ *
+ * DTV7 is only used in Australia. Germany or Italy may also
+ * use this firmware after initialization, but a tune to a UHF
+ * channel should then cause DTV78 to be used.
+ */
+ if (type & DTV7)
+ demod += 500;
+ }
return generic_set_freq(fe, p->frequency,
T_DIGITAL_TV, type, 0, demod);
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index efb4a6c2b57a..bc370183ec97 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -410,7 +410,7 @@ static int skystar2_rev28_attach(struct flexcop_device *fc,
if (!fc->fe)
return 0;
- i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);;
+ i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);
if (!i2c_tuner)
return 0;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 818b2ab584bf..49fd78118a64 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -310,7 +310,7 @@ static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap)
struct i2c_adapter *tun_i2c;
tun_i2c = dib7000p_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
return dvb_attach(mt2266_attach, adap->fe, tun_i2c,
- &stk7700d_mt2266_config[adap->id]) == NULL ? -ENODEV : 0;;
+ &stk7700d_mt2266_config[adap->id]) == NULL ? -ENODEV : 0;
}
/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index 0592f043ea64..d8f921b6fafd 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -458,7 +458,7 @@ static int cx24123_set_symbolrate(struct cx24123_state *state, u32 srate)
/* check if symbol rate is within limits */
if ((srate > state->frontend.ops.info.symbol_rate_max) ||
(srate < state->frontend.ops.info.symbol_rate_min))
- return -EOPNOTSUPP;;
+ return -EOPNOTSUPP;
/* choose the sampling rate high enough for the required operation,
while optimizing the power consumed by the demodulator */
diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
index fe895bf7b18f..da92cbe1b8ea 100644
--- a/drivers/media/dvb/frontends/dib0070.c
+++ b/drivers/media/dvb/frontends/dib0070.c
@@ -167,7 +167,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_par
break;
case BAND_SBAND:
LO4_SET_VCO_HFDIV(lo4, 0, 0);
- LO4_SET_CTRIM(lo4, 1);;
+ LO4_SET_CTRIM(lo4, 1);
c = 1;
break;
case BAND_UHF:
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index a621f727935f..071328d7b859 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -85,7 +85,7 @@ static int mt312_read(struct mt312_state *state, const enum mt312_reg_addr reg,
int i;
dprintk("R(%d):", reg & 0x7f);
for (i = 0; i < count; i++)
- printk(" %02x", buf[i]);
+ printk(KERN_CONT " %02x", buf[i]);
printk("\n");
}
@@ -103,7 +103,7 @@ static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg,
int i;
dprintk("W(%d):", reg & 0x7f);
for (i = 0; i < count; i++)
- printk(" %02x", src[i]);
+ printk(KERN_CONT " %02x", src[i]);
printk("\n");
}
@@ -744,7 +744,8 @@ static struct dvb_frontend_ops mt312_ops = {
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
- .frequency_stepsize = (MT312_PLL_CLK / 1000) / 128, /* FIXME: adjust freq to real used xtal */
+ /* FIXME: adjust freq to real used xtal */
+ .frequency_stepsize = (MT312_PLL_CLK / 1000) / 128,
.symbol_rate_min = MT312_SYS_CLK / 128, /* FIXME as above */
.symbol_rate_max = MT312_SYS_CLK / 2,
.caps =
diff --git a/drivers/media/dvb/frontends/stv0900_sw.c b/drivers/media/dvb/frontends/stv0900_sw.c
index a5a31536cbcb..962fde1437ce 100644
--- a/drivers/media/dvb/frontends/stv0900_sw.c
+++ b/drivers/media/dvb/frontends/stv0900_sw.c
@@ -1721,7 +1721,7 @@ static enum fe_stv0900_signal_type stv0900_dvbs1_acq_workaround(struct dvb_front
s32 srate, demod_timeout,
fec_timeout, freq1, freq0;
- enum fe_stv0900_signal_type signal_type = STV0900_NODATA;;
+ enum fe_stv0900_signal_type signal_type = STV0900_NODATA;
switch (demod) {
case STV0900_DEMOD_1:
diff --git a/drivers/media/dvb/frontends/zl10036.c b/drivers/media/dvb/frontends/zl10036.c
index e22a0b381dc4..4e814ff22b23 100644
--- a/drivers/media/dvb/frontends/zl10036.c
+++ b/drivers/media/dvb/frontends/zl10036.c
@@ -29,7 +29,7 @@
#include <linux/module.h>
#include <linux/dvb/frontend.h>
-#include <asm/types.h>
+#include <linux/types.h>
#include "zl10036.h"
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 68eb4493f991..d8d4214fd65f 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -1,5 +1,6 @@
config TTPCI_EEPROM
tristate
+ depends on I2C
default n
config DVB_AV7110
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index ce64c6214cc4..8986d967d2f4 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -490,7 +490,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
if (!av7110->analog_tuner_flags)
return 0;
- if (input < 0 || input >= 4)
+ if (input >= 4)
return -EINVAL;
av7110->current_input = input;
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index 640421ceb24a..0c4bf6d3bddf 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -106,20 +106,24 @@
* Tobias Lorenz <tobias.lorenz@gmx.net>
* - add LED status output
* - get HW/SW version from scratchpad
+ * 2009-06-16 Edouard Lafargue <edouard@lafargue.name>
+ * Version 1.0.10
+ * - add support for interrupt mode for RDS endpoint,
+ * instead of polling.
+ * Improves RDS reception significantly
*
* ToDo:
* - add firmware download/update support
- * - RDS support: interrupt mode, instead of polling
*/
/* driver definitions */
#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
#define DRIVER_NAME "radio-si470x"
-#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 9)
+#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 10)
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
-#define DRIVER_VERSION "1.0.9"
+#define DRIVER_VERSION "1.0.10"
/* kernel includes */
@@ -217,16 +221,6 @@ static unsigned short max_rds_errors = 1;
module_param(max_rds_errors, ushort, 0644);
MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
-/* RDS poll frequency */
-static unsigned int rds_poll_time = 40;
-/* 40 is used by the original USBRadio.exe */
-/* 50 is used by radio-cadet */
-/* 75 should be okay */
-/* 80 is the usual RDS receive interval */
-module_param(rds_poll_time, uint, 0644);
-MODULE_PARM_DESC(rds_poll_time, "RDS poll time (ms): *40*");
-
-
/**************************************************************************
* Register Definitions
@@ -449,6 +443,12 @@ struct si470x_device {
struct usb_interface *intf;
struct video_device *videodev;
+ /* Interrupt endpoint handling */
+ char *int_in_buffer;
+ struct usb_endpoint_descriptor *int_in_endpoint;
+ struct urb *int_in_urb;
+ int int_in_running;
+
/* driver management */
unsigned int users;
unsigned char disconnected;
@@ -458,7 +458,6 @@ struct si470x_device {
unsigned short registers[RADIO_REGISTER_NUM];
/* RDS receive buffer */
- struct delayed_work work;
wait_queue_head_t read_queue;
struct mutex lock; /* buffer locking */
unsigned char *buffer; /* size is always multiple of three */
@@ -864,43 +863,6 @@ static int si470x_get_all_registers(struct si470x_device *radio)
/**************************************************************************
- * General Driver Functions - RDS_REPORT
- **************************************************************************/
-
-/*
- * si470x_get_rds_registers - read rds registers
- */
-static int si470x_get_rds_registers(struct si470x_device *radio)
-{
- unsigned char buf[RDS_REPORT_SIZE];
- int retval;
- int size;
- unsigned char regnr;
-
- buf[0] = RDS_REPORT;
-
- retval = usb_interrupt_msg(radio->usbdev,
- usb_rcvintpipe(radio->usbdev, 1),
- (void *) &buf, sizeof(buf), &size, usb_timeout);
- if (size != sizeof(buf))
- printk(KERN_WARNING DRIVER_NAME ": si470x_get_rds_registers: "
- "return size differs: %d != %zu\n", size, sizeof(buf));
- if (retval < 0)
- printk(KERN_WARNING DRIVER_NAME ": si470x_get_rds_registers: "
- "usb_interrupt_msg returned %d\n", retval);
-
- if (retval >= 0)
- for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++)
- radio->registers[STATUSRSSI + regnr] =
- get_unaligned_be16(
- &buf[regnr * RADIO_REGISTER_SIZE + 1]);
-
- return (retval < 0) ? -EINVAL : 0;
-}
-
-
-
-/**************************************************************************
* General Driver Functions - LED_REPORT
**************************************************************************/
@@ -958,102 +920,118 @@ static int si470x_get_scratch_page_versions(struct si470x_device *radio)
**************************************************************************/
/*
- * si470x_rds - rds processing function
+ * si470x_int_in_callback - rds callback and processing function
+ *
+ * TODO: do we need to use mutex locks in some sections?
*/
-static void si470x_rds(struct si470x_device *radio)
+static void si470x_int_in_callback(struct urb *urb)
{
+ struct si470x_device *radio = urb->context;
+ unsigned char buf[RDS_REPORT_SIZE];
+ int retval;
+ unsigned char regnr;
unsigned char blocknum;
unsigned short bler; /* rds block errors */
unsigned short rds;
unsigned char tmpbuf[3];
- /* get rds blocks */
- if (si470x_get_rds_registers(radio) < 0)
- return;
- if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0) {
- /* No RDS group ready */
- return;
- }
- if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSS) == 0) {
- /* RDS decoder not synchronized */
- return;
- }
-
- /* copy all four RDS blocks to internal buffer */
- mutex_lock(&radio->lock);
- for (blocknum = 0; blocknum < 4; blocknum++) {
- switch (blocknum) {
- default:
- bler = (radio->registers[STATUSRSSI] &
- STATUSRSSI_BLERA) >> 9;
- rds = radio->registers[RDSA];
- break;
- case 1:
- bler = (radio->registers[READCHAN] &
- READCHAN_BLERB) >> 14;
- rds = radio->registers[RDSB];
- break;
- case 2:
- bler = (radio->registers[READCHAN] &
- READCHAN_BLERC) >> 12;
- rds = radio->registers[RDSC];
- break;
- case 3:
- bler = (radio->registers[READCHAN] &
- READCHAN_BLERD) >> 10;
- rds = radio->registers[RDSD];
- break;
- };
-
- /* Fill the V4L2 RDS buffer */
- put_unaligned_le16(rds, &tmpbuf);
- tmpbuf[2] = blocknum; /* offset name */
- tmpbuf[2] |= blocknum << 3; /* received offset */
- if (bler > max_rds_errors)
- tmpbuf[2] |= 0x80; /* uncorrectable errors */
- else if (bler > 0)
- tmpbuf[2] |= 0x40; /* corrected error(s) */
-
- /* copy RDS block to internal buffer */
- memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3);
- radio->wr_index += 3;
-
- /* wrap write pointer */
- if (radio->wr_index >= radio->buf_size)
- radio->wr_index = 0;
-
- /* check for overflow */
- if (radio->wr_index == radio->rd_index) {
- /* increment and wrap read pointer */
- radio->rd_index += 3;
- if (radio->rd_index >= radio->buf_size)
- radio->rd_index = 0;
+ if (urb->status) {
+ if (urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
+ return;
+ } else {
+ printk(KERN_WARNING DRIVER_NAME
+ ": non-zero urb status (%d)\n", urb->status);
+ goto resubmit; /* Maybe we can recover. */
}
}
- mutex_unlock(&radio->lock);
-
- /* wake up read queue */
- if (radio->wr_index != radio->rd_index)
- wake_up_interruptible(&radio->read_queue);
-}
-
-
-/*
- * si470x_work - rds work function
- */
-static void si470x_work(struct work_struct *work)
-{
- struct si470x_device *radio = container_of(work, struct si470x_device,
- work.work);
/* safety checks */
if (radio->disconnected)
return;
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
- return;
+ goto resubmit;
- si470x_rds(radio);
- schedule_delayed_work(&radio->work, msecs_to_jiffies(rds_poll_time));
+ if (urb->actual_length > 0) {
+ /* Update RDS registers with URB data */
+ buf[0] = RDS_REPORT;
+ for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++)
+ radio->registers[STATUSRSSI + regnr] =
+ get_unaligned_be16(&radio->int_in_buffer[
+ regnr * RADIO_REGISTER_SIZE + 1]);
+ /* get rds blocks */
+ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0) {
+ /* No RDS group ready, better luck next time */
+ goto resubmit;
+ }
+ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSS) == 0) {
+ /* RDS decoder not synchronized */
+ goto resubmit;
+ }
+ for (blocknum = 0; blocknum < 4; blocknum++) {
+ switch (blocknum) {
+ default:
+ bler = (radio->registers[STATUSRSSI] &
+ STATUSRSSI_BLERA) >> 9;
+ rds = radio->registers[RDSA];
+ break;
+ case 1:
+ bler = (radio->registers[READCHAN] &
+ READCHAN_BLERB) >> 14;
+ rds = radio->registers[RDSB];
+ break;
+ case 2:
+ bler = (radio->registers[READCHAN] &
+ READCHAN_BLERC) >> 12;
+ rds = radio->registers[RDSC];
+ break;
+ case 3:
+ bler = (radio->registers[READCHAN] &
+ READCHAN_BLERD) >> 10;
+ rds = radio->registers[RDSD];
+ break;
+ };
+
+ /* Fill the V4L2 RDS buffer */
+ put_unaligned_le16(rds, &tmpbuf);
+ tmpbuf[2] = blocknum; /* offset name */
+ tmpbuf[2] |= blocknum << 3; /* received offset */
+ if (bler > max_rds_errors)
+ tmpbuf[2] |= 0x80; /* uncorrectable errors */
+ else if (bler > 0)
+ tmpbuf[2] |= 0x40; /* corrected error(s) */
+
+ /* copy RDS block to internal buffer */
+ memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3);
+ radio->wr_index += 3;
+
+ /* wrap write pointer */
+ if (radio->wr_index >= radio->buf_size)
+ radio->wr_index = 0;
+
+ /* check for overflow */
+ if (radio->wr_index == radio->rd_index) {
+ /* increment and wrap read pointer */
+ radio->rd_index += 3;
+ if (radio->rd_index >= radio->buf_size)
+ radio->rd_index = 0;
+ }
+ }
+ if (radio->wr_index != radio->rd_index)
+ wake_up_interruptible(&radio->read_queue);
+ }
+
+resubmit:
+ /* Resubmit if we're still running. */
+ if (radio->int_in_running && radio->usbdev) {
+ retval = usb_submit_urb(radio->int_in_urb, GFP_ATOMIC);
+ if (retval) {
+ printk(KERN_WARNING DRIVER_NAME
+ ": resubmitting urb failed (%d)", retval);
+ radio->int_in_running = 0;
+ }
+ }
}
@@ -1075,8 +1053,6 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
/* switch on rds reception */
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) {
si470x_rds_on(radio);
- schedule_delayed_work(&radio->work,
- msecs_to_jiffies(rds_poll_time));
}
/* block if no new data available */
@@ -1135,8 +1111,6 @@ static unsigned int si470x_fops_poll(struct file *file,
/* switch on rds reception */
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) {
si470x_rds_on(radio);
- schedule_delayed_work(&radio->work,
- msecs_to_jiffies(rds_poll_time));
}
poll_wait(file, &radio->read_queue, pts);
@@ -1169,8 +1143,31 @@ static int si470x_fops_open(struct file *file)
if (radio->users == 1) {
/* start radio */
retval = si470x_start(radio);
- if (retval < 0)
+ if (retval < 0) {
usb_autopm_put_interface(radio->intf);
+ goto done;
+ }
+
+ /* initialize interrupt urb */
+ usb_fill_int_urb(radio->int_in_urb, radio->usbdev,
+ usb_rcvintpipe(radio->usbdev,
+ radio->int_in_endpoint->bEndpointAddress),
+ radio->int_in_buffer,
+ le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize),
+ si470x_int_in_callback,
+ radio,
+ radio->int_in_endpoint->bInterval);
+
+ radio->int_in_running = 1;
+ mb();
+
+ retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL);
+ if (retval) {
+ printk(KERN_INFO DRIVER_NAME
+ ": submitting int urb failed (%d)\n", retval);
+ radio->int_in_running = 0;
+ usb_autopm_put_interface(radio->intf);
+ }
}
done:
@@ -1196,16 +1193,21 @@ static int si470x_fops_release(struct file *file)
mutex_lock(&radio->disconnect_lock);
radio->users--;
if (radio->users == 0) {
+ /* shutdown interrupt handler */
+ if (radio->int_in_running) {
+ radio->int_in_running = 0;
+ if (radio->int_in_urb)
+ usb_kill_urb(radio->int_in_urb);
+ }
+
if (radio->disconnected) {
video_unregister_device(radio->videodev);
+ kfree(radio->int_in_buffer);
kfree(radio->buffer);
kfree(radio);
- goto done;
+ goto unlock;
}
- /* stop rds reception */
- cancel_delayed_work_sync(&radio->work);
-
/* cancel read processes */
wake_up_interruptible(&radio->read_queue);
@@ -1213,9 +1215,8 @@ static int si470x_fops_release(struct file *file)
retval = si470x_stop(radio);
usb_autopm_put_interface(radio->intf);
}
-
+unlock:
mutex_unlock(&radio->disconnect_lock);
-
done:
return retval;
}
@@ -1240,31 +1241,6 @@ static const struct v4l2_file_operations si470x_fops = {
**************************************************************************/
/*
- * si470x_v4l2_queryctrl - query control
- */
-static struct v4l2_queryctrl si470x_v4l2_queryctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Volume",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 15,
- },
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
-};
-
-
-/*
* si470x_vidioc_querycap - query device capabilities
*/
static int si470x_vidioc_querycap(struct file *file, void *priv,
@@ -1289,7 +1265,6 @@ static int si470x_vidioc_querycap(struct file *file, void *priv,
static int si470x_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- unsigned char i = 0;
int retval = -EINVAL;
/* abort if qc->id is below V4L2_CID_BASE */
@@ -1297,12 +1272,11 @@ static int si470x_vidioc_queryctrl(struct file *file, void *priv,
goto done;
/* search video control */
- for (i = 0; i < ARRAY_SIZE(si470x_v4l2_queryctrl); i++) {
- if (qc->id == si470x_v4l2_queryctrl[i].id) {
- memcpy(qc, &(si470x_v4l2_queryctrl[i]), sizeof(*qc));
- retval = 0; /* found */
- break;
- }
+ switch (qc->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(qc, 0, 15, 1, 15);
+ case V4L2_CID_AUDIO_MUTE:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
}
/* disable unsupported base controls */
@@ -1657,7 +1631,9 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct si470x_device *radio;
- int retval = 0;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i, int_end_size, retval = 0;
/* private data allocation and initialization */
radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL);
@@ -1672,11 +1648,45 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
mutex_init(&radio->disconnect_lock);
mutex_init(&radio->lock);
+ iface_desc = intf->cur_altsetting;
+
+ /* Set up interrupt endpoint information. */
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+ if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
+ USB_DIR_IN) && ((endpoint->bmAttributes &
+ USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT))
+ radio->int_in_endpoint = endpoint;
+ }
+ if (!radio->int_in_endpoint) {
+ printk(KERN_INFO DRIVER_NAME
+ ": could not find interrupt in endpoint\n");
+ retval = -EIO;
+ goto err_radio;
+ }
+
+ int_end_size = le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize);
+
+ radio->int_in_buffer = kmalloc(int_end_size, GFP_KERNEL);
+ if (!radio->int_in_buffer) {
+ printk(KERN_INFO DRIVER_NAME
+ "could not allocate int_in_buffer");
+ retval = -ENOMEM;
+ goto err_radio;
+ }
+
+ radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!radio->int_in_urb) {
+ printk(KERN_INFO DRIVER_NAME "could not allocate int_in_urb");
+ retval = -ENOMEM;
+ goto err_intbuffer;
+ }
+
/* video device allocation and initialization */
radio->videodev = video_device_alloc();
if (!radio->videodev) {
retval = -ENOMEM;
- goto err_radio;
+ goto err_intbuffer;
}
memcpy(radio->videodev, &si470x_viddev_template,
sizeof(si470x_viddev_template));
@@ -1734,9 +1744,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->rd_index = 0;
init_waitqueue_head(&radio->read_queue);
- /* prepare rds work function */
- INIT_DELAYED_WORK(&radio->work, si470x_work);
-
/* register video device */
retval = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr);
if (retval) {
@@ -1751,6 +1758,8 @@ err_all:
kfree(radio->buffer);
err_video:
video_device_release(radio->videodev);
+err_intbuffer:
+ kfree(radio->int_in_buffer);
err_radio:
kfree(radio);
err_initial:
@@ -1764,12 +1773,8 @@ err_initial:
static int si470x_usb_driver_suspend(struct usb_interface *intf,
pm_message_t message)
{
- struct si470x_device *radio = usb_get_intfdata(intf);
-
printk(KERN_INFO DRIVER_NAME ": suspending now...\n");
- cancel_delayed_work_sync(&radio->work);
-
return 0;
}
@@ -1779,16 +1784,8 @@ static int si470x_usb_driver_suspend(struct usb_interface *intf,
*/
static int si470x_usb_driver_resume(struct usb_interface *intf)
{
- struct si470x_device *radio = usb_get_intfdata(intf);
-
printk(KERN_INFO DRIVER_NAME ": resuming now...\n");
- mutex_lock(&radio->lock);
- if (radio->users && radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)
- schedule_delayed_work(&radio->work,
- msecs_to_jiffies(rds_poll_time));
- mutex_unlock(&radio->lock);
-
return 0;
}
@@ -1802,12 +1799,15 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
mutex_lock(&radio->disconnect_lock);
radio->disconnected = 1;
- cancel_delayed_work_sync(&radio->work);
usb_set_intfdata(intf, NULL);
if (radio->users == 0) {
/* set led to disconnect state */
si470x_set_led_state(radio, BLINK_ORANGE_LED);
+ /* Free data structures. */
+ usb_free_urb(radio->int_in_urb);
+
+ kfree(radio->int_in_buffer);
video_unregister_device(radio->videodev);
kfree(radio->buffer);
kfree(radio);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 061e147f6f26..84b6fc15519d 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -312,6 +312,14 @@ config VIDEO_OV7670
OV7670 VGA camera. It currently only works with the M88ALP01
controller.
+config VIDEO_MT9V011
+ tristate "Micron mt9v011 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt0v011 1.3 Mpixel camera. It currently only works with the
+ em28xx driver.
+
config VIDEO_TCM825X
tristate "TCM825x camera sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 7fb3add1b387..9f2e3214a482 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
+obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
diff --git a/drivers/media/video/adv7343.c b/drivers/media/video/adv7343.c
index 30f5caf5dda5..df26f2fe44eb 100644
--- a/drivers/media/video/adv7343.c
+++ b/drivers/media/video/adv7343.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
-#include <linux/version.h>
#include <media/adv7343.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5eb1464af670..d8d3e1dc2cbc 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4591,14 +4591,10 @@ static int bttv_resume(struct pci_dev *pci_dev)
#endif
static struct pci_device_id bttv_pci_tbl[] = {
- {PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT849,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT878,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT879,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT848), 0},
+ {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT849), 0},
+ {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT878), 0},
+ {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT879), 0},
{0,}
};
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index c92a25036f0e..96b687be8329 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -198,11 +198,14 @@ static const struct cx18_card_pci_info cx18_pci_mpc718[] = {
static const struct cx18_card cx18_card_mpc718 = {
.type = CX18_CARD_YUAN_MPC718,
- .name = "Yuan MPC718",
- .comment = "Analog video capture works; some audio line in may not.\n",
+ .name = "Yuan MPC718 MiniPCI DVB-T/Analog",
+ .comment = "Experimenters needed for device to work well.\n"
+ "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_418_AV,
- .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL,
+ .hw_muxer = CX18_HW_GPIO_MUX,
+ .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
+ CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
{ CX18_CARD_INPUT_SVIDEO1, 1,
@@ -211,27 +214,34 @@ static const struct cx18_card cx18_card_mpc718 = {
{ CX18_CARD_INPUT_SVIDEO2, 2,
CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 },
{ CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 },
- { CX18_CARD_INPUT_COMPOSITE3, 2, CX18_AV_COMPOSITE3 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
- { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 0 },
- { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, 0 },
+ { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
+ { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 },
},
- .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 0 },
.tuners = {
/* XC3028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
+ /* FIXME - the FM radio is just a guess and driver doesn't use SIF */
+ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
.ddr = {
- /* Probably Samsung K4D263238G-VC33 memory */
- .chip_config = 0x003,
- .refresh = 0x30c,
- .timing1 = 0x23230b73,
- .timing2 = 0x08,
+ /* Hynix HY5DU283222B DDR RAM */
+ .chip_config = 0x303,
+ .refresh = 0x3bd,
+ .timing1 = 0x36320966,
+ .timing2 = 0x1f,
.tune_lane = 0,
.initial_emrs = 2,
},
+ .gpio_init.initial_value = 0x1,
+ .gpio_init.direction = 0x3,
+ /* FIXME - these GPIO's are just guesses */
+ .gpio_audio_input = { .mask = 0x3,
+ .tuner = 0x1,
+ .linein = 0x3,
+ .radio = 0x1 },
.xceive_pin = 0,
.pci_list = cx18_pci_mpc718,
.i2c = &cx18_i2c_std,
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 92026e82e10e..dd0224f328ad 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -268,6 +268,20 @@ static void cx18_iounmap(struct cx18 *cx)
}
}
+static void cx18_eeprom_dump(struct cx18 *cx, unsigned char *eedata, int len)
+{
+ int i;
+
+ CX18_INFO("eeprom dump:\n");
+ for (i = 0; i < len; i++) {
+ if (0 == (i % 16))
+ CX18_INFO("eeprom %02x:", i);
+ printk(KERN_CONT " %02x", eedata[i]);
+ if (15 == (i % 16))
+ printk(KERN_CONT "\n");
+ }
+}
+
/* Hauppauge card? get values from tveeprom */
void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
{
@@ -279,8 +293,26 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
c.adapter = &cx->i2c_adap[0];
c.addr = 0xA0 >> 1;
- tveeprom_read(&c, eedata, sizeof(eedata));
- tveeprom_hauppauge_analog(&c, tv, eedata);
+ memset(tv, 0, sizeof(*tv));
+ if (tveeprom_read(&c, eedata, sizeof(eedata)))
+ return;
+
+ switch (cx->card->type) {
+ case CX18_CARD_HVR_1600_ESMT:
+ case CX18_CARD_HVR_1600_SAMSUNG:
+ tveeprom_hauppauge_analog(&c, tv, eedata);
+ break;
+ case CX18_CARD_YUAN_MPC718:
+ tv->model = 0x718;
+ cx18_eeprom_dump(cx, eedata, sizeof(eedata));
+ CX18_INFO("eeprom PCI ID: %02x%02x:%02x%02x\n",
+ eedata[2], eedata[1], eedata[4], eedata[3]);
+ break;
+ default:
+ tv->model = 0xffffffff;
+ cx18_eeprom_dump(cx, eedata, sizeof(eedata));
+ break;
+ }
}
static void cx18_process_eeprom(struct cx18 *cx)
@@ -298,6 +330,11 @@ static void cx18_process_eeprom(struct cx18 *cx)
case 74000 ... 74999:
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
break;
+ case 0x718:
+ return;
+ case 0xffffffff:
+ CX18_INFO("Unknown EEPROM encoding\n");
+ return;
case 0:
CX18_ERR("Invalid EEPROM\n");
return;
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 6ea3fe623ef4..51a0c33b25b7 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -30,6 +30,10 @@
#include "s5h1409.h"
#include "mxl5005s.h"
#include "zl10353.h"
+
+#include <linux/firmware.h>
+#include "mt352.h"
+#include "mt352_priv.h"
#include "tuner-xc2028.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -38,6 +42,11 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define CX18_CLOCK_ENABLE2 0xc71024
#define CX18_DMUX_CLK_MASK 0x0080
+/*
+ * CX18_CARD_HVR_1600_ESMT
+ * CX18_CARD_HVR_1600_SAMSUNG
+ */
+
static struct mxl5005s_config hauppauge_hvr1600_tuner = {
.i2c_address = 0xC6 >> 1,
.if_freq = IF_FREQ_5380000HZ,
@@ -65,6 +74,9 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
};
+/*
+ * CX18_CARD_LEADTEK_DVR3100H
+ */
/* Information/confirmation of proper config values provided by Terry Wu */
static struct zl10353_config leadtek_dvr3100h_demod = {
.demod_address = 0x1e >> 1, /* Datasheet suggested straps */
@@ -74,6 +86,121 @@ static struct zl10353_config leadtek_dvr3100h_demod = {
.disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
};
+/*
+ * CX18_CARD_YUAN_MPC718
+ */
+/*
+ * Due to
+ *
+ * 1. an absence of information on how to prgram the MT352
+ * 2. the Linux mt352 module pushing MT352 initialzation off onto us here
+ *
+ * We have to use an init sequence that *you* must extract from the Windows
+ * driver (yuanrap.sys) and which we load as a firmware.
+ *
+ * If someone can provide me with a Zarlink MT352 (Intel CE6352?) Design Manual
+ * with chip programming details, then I can remove this annoyance.
+ */
+static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
+ const struct firmware **fw)
+{
+ struct cx18 *cx = stream->cx;
+ const char *fn = "dvb-cx18-mpc718-mt352.fw";
+ int ret;
+
+ ret = request_firmware(fw, fn, &cx->pci_dev->dev);
+ if (ret)
+ CX18_ERR("Unable to open firmware file %s\n", fn);
+ else {
+ size_t sz = (*fw)->size;
+ if (sz < 2 || sz > 64 || (sz % 2) != 0) {
+ CX18_ERR("Firmware %s has a bad size: %lu bytes\n",
+ fn, (unsigned long) sz);
+ ret = -EILSEQ;
+ release_firmware(*fw);
+ *fw = NULL;
+ }
+ }
+
+ if (ret) {
+ CX18_ERR("The MPC718 board variant with the MT352 DVB-T"
+ "demodualtor will not work without it\n");
+ CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware "
+ "mpc718' if you need the firmware\n");
+ }
+ return ret;
+}
+
+static int yuan_mpc718_mt352_init(struct dvb_frontend *fe)
+{
+ struct cx18_dvb *dvb = container_of(fe->dvb,
+ struct cx18_dvb, dvb_adapter);
+ struct cx18_stream *stream = container_of(dvb, struct cx18_stream, dvb);
+ const struct firmware *fw = NULL;
+ int ret;
+ int i;
+ u8 buf[3];
+
+ ret = yuan_mpc718_mt352_reqfw(stream, &fw);
+ if (ret)
+ return ret;
+
+ /* Loop through all the register-value pairs in the firmware file */
+ for (i = 0; i < fw->size; i += 2) {
+ buf[0] = fw->data[i];
+ /* Intercept a few registers we want to set ourselves */
+ switch (buf[0]) {
+ case TRL_NOMINAL_RATE_0:
+ /* Set our custom OFDM bandwidth in the case below */
+ break;
+ case TRL_NOMINAL_RATE_1:
+ /* 6 MHz: 64/7 * 6/8 / 20.48 * 2^16 = 0x55b6.db6 */
+ /* 7 MHz: 64/7 * 7/8 / 20.48 * 2^16 = 0x6400 */
+ /* 8 MHz: 64/7 * 8/8 / 20.48 * 2^16 = 0x7249.249 */
+ buf[1] = 0x72;
+ buf[2] = 0x49;
+ mt352_write(fe, buf, 3);
+ break;
+ case INPUT_FREQ_0:
+ /* Set our custom IF in the case below */
+ break;
+ case INPUT_FREQ_1:
+ /* 4.56 MHz IF: (20.48 - 4.56)/20.48 * 2^14 = 0x31c0 */
+ buf[1] = 0x31;
+ buf[2] = 0xc0;
+ mt352_write(fe, buf, 3);
+ break;
+ default:
+ /* Pass through the register-value pair from the fw */
+ buf[1] = fw->data[i+1];
+ mt352_write(fe, buf, 2);
+ break;
+ }
+ }
+
+ buf[0] = (u8) TUNER_GO;
+ buf[1] = 0x01; /* Go */
+ mt352_write(fe, buf, 2);
+ release_firmware(fw);
+ return 0;
+}
+
+static struct mt352_config yuan_mpc718_mt352_demod = {
+ .demod_address = 0x1e >> 1,
+ .adc_clock = 20480, /* 20.480 MHz */
+ .if2 = 4560, /* 4.560 MHz */
+ .no_tuner = 1, /* XC3028 is not behind the gate */
+ .demod_init = yuan_mpc718_mt352_init,
+};
+
+static struct zl10353_config yuan_mpc718_zl10353_demod = {
+ .demod_address = 0x1e >> 1, /* Datasheet suggested straps */
+ .if2 = 45600, /* 4.560 MHz IF from the XC3028 */
+ .parallel_ts = 1, /* Not a serial TS */
+ .no_tuner = 1, /* XC3028 is not behind the gate */
+ .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
+};
+
static int dvb_register(struct cx18_stream *stream);
/* Kernel DVB framework calls this when the feed needs to start.
@@ -113,6 +240,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
break;
case CX18_CARD_LEADTEK_DVR3100H:
+ case CX18_CARD_YUAN_MPC718:
default:
/* Assumption - Parallel transport - Signalling
* undefined or default.
@@ -326,6 +454,38 @@ static int dvb_register(struct cx18_stream *stream)
fe->ops.tuner_ops.set_config(fe, &ctrl);
}
break;
+ case CX18_CARD_YUAN_MPC718:
+ /*
+ * TODO
+ * Apparently, these cards also could instead have a
+ * DiBcom demod supported by one of the db7000 drivers
+ */
+ dvb->fe = dvb_attach(mt352_attach,
+ &yuan_mpc718_mt352_demod,
+ &cx->i2c_adap[1]);
+ if (dvb->fe == NULL)
+ dvb->fe = dvb_attach(zl10353_attach,
+ &yuan_mpc718_zl10353_demod,
+ &cx->i2c_adap[1]);
+ if (dvb->fe != NULL) {
+ struct dvb_frontend *fe;
+ struct xc2028_config cfg = {
+ .i2c_adap = &cx->i2c_adap[1],
+ .i2c_addr = 0xc2 >> 1,
+ .ctrl = NULL,
+ };
+ static struct xc2028_ctrl ctrl = {
+ .fname = XC2028_DEFAULT_FIRMWARE,
+ .max_len = 64,
+ .demod = XC3028_FE_ZARLINK456,
+ .type = XC2028_AUTO,
+ };
+
+ fe = dvb_attach(xc2028_attach, dvb->fe, &cfg);
+ if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
+ fe->ops.tuner_ops.set_config(fe, &ctrl);
+ }
+ break;
default:
/* No Digital Tv Support */
break;
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 29969c18949c..04d9c2508b86 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -690,7 +690,7 @@ int cx18_v4l2_open(struct file *filp)
int res;
struct video_device *video_dev = video_devdata(filp);
struct cx18_stream *s = video_get_drvdata(video_dev);
- struct cx18 *cx = s->cx;;
+ struct cx18 *cx = s->cx;
mutex_lock(&cx->serialize_lock);
if (cx18_init_on_first_open(cx)) {
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index d7b1921e6666..fc76e4d6ffa7 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -605,7 +605,7 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
if (ret)
return ret;
- if (inp < 0 || inp >= cx->nof_inputs)
+ if (inp >= cx->nof_inputs)
return -EINVAL;
if (inp == cx->active_input) {
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 609bae6098d3..36503725d973 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -923,8 +923,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
- f->fmt.pix.pixelformat = dev->format->fourcc;;
- f->fmt.pix.bytesperline = (dev->width * dev->format->depth + 7) >> 3;;
+ f->fmt.pix.pixelformat = dev->format->fourcc;
+ f->fmt.pix.bytesperline = (dev->width * dev->format->depth + 7) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * dev->height;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 48a975134ac5..86ac529e62be 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -463,6 +463,30 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
.if_khz = 5380,
};
+static int cx23885_dvb_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *param)
+{
+ struct cx23885_tsport *port = fe->dvb->priv;
+ struct cx23885_dev *dev = port->dev;
+
+ switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1275:
+ switch (param->u.vsb.modulation) {
+ case VSB_8:
+ cx23885_gpio_clear(dev, GPIO_5);
+ break;
+ case QAM_64:
+ case QAM_256:
+ default:
+ cx23885_gpio_set(dev, GPIO_5);
+ break;
+ }
+ break;
+ }
+ return (port->set_frontend_save) ?
+ port->set_frontend_save(fe, param) : -ENODEV;
+}
+
static int dvb_register(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
@@ -502,6 +526,12 @@ static int dvb_register(struct cx23885_tsport *port)
0x60, &dev->i2c_bus[1].i2c_adap,
&hauppauge_hvr127x_config);
}
+
+ /* FIXME: temporary hack */
+ /* define bridge override to set_frontend */
+ port->set_frontend_save = fe0->dvb.frontend->ops.set_frontend;
+ fe0->dvb.frontend->ops.set_frontend = cx23885_dvb_set_frontend;
+
break;
case CX23885_BOARD_HAUPPAUGE_HVR1255:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 1a2ac518a3f1..76a51509d8fb 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -288,6 +288,10 @@ struct cx23885_tsport {
/* Allow a single tsport to have multiple frontends */
u32 num_frontends;
void *port_priv;
+
+ /* FIXME: temporary hack */
+ int (*set_frontend_save) (struct dvb_frontend *,
+ struct dvb_frontend_parameters *);
};
struct cx23885_dev {
@@ -391,7 +395,7 @@ struct sram_channel {
u32 cmds_start;
u32 ctrl_start;
u32 cdt;
- u32 fifo_start;;
+ u32 fifo_start;
u32 fifo_size;
u32 ptr1_reg;
u32 ptr2_reg;
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 16a5af30e9d1..6524b493e033 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -8,6 +8,8 @@ config VIDEO_EM28XX
select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
+ select VIDEO_MT9V011 if VIDEO_HELPER_CHIPS_AUTO
+
---help---
This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index c43fdb9bc888..03b9af757d18 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -58,6 +58,8 @@ static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
+#define MT9V011_VERSION 0x8243
+
/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */
static unsigned long em28xx_devused;
@@ -191,6 +193,13 @@ static struct em28xx_reg_seq terratec_av350_unmute_gpio[] = {
{EM28XX_R08_GPIO, 0xff, 0xff, 10},
{ -1, -1, -1, -1},
};
+
+static struct em28xx_reg_seq silvercrest_reg_seq[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM28XX_R08_GPIO, 0x01, 0xf7, 10},
+ { -1, -1, -1, -1},
+};
+
/*
* Board definitions
*/
@@ -438,6 +447,18 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
} },
},
+ [EM2820_BOARD_SILVERCREST_WEBCAM] = {
+ .name = "Silvercrest Webcam 1.3mpix",
+ .tuner_type = TUNER_ABSENT,
+ .is_27xx = 1,
+ .decoder = EM28XX_MT9V011,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = 0,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = silvercrest_reg_seq,
+ } },
+ },
[EM2821_BOARD_SUPERCOMP_USB_2] = {
.name = "Supercomp USB 2.0 TV",
.valid = EM28XX_BOARD_NOT_VALIDATED,
@@ -826,7 +847,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
- .dvb_gpio = default_analog,
+ .dvb_gpio = default_digital,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1603,6 +1624,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2861_BOARD_PLEXTOR_PX_TV100U },
{ USB_DEVICE(0x04bb, 0x0515),
.driver_info = EM2820_BOARD_IODATA_GVMVP_SZ },
+ { USB_DEVICE(0xeb1a, 0x50a6),
+ .driver_info = EM2860_BOARD_GADMEI_UTV330 },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -1639,6 +1662,11 @@ static unsigned short tvp5150_addrs[] = {
I2C_CLIENT_END
};
+static unsigned short mt9v011_addrs[] = {
+ 0xba >> 1,
+ I2C_CLIENT_END
+};
+
static unsigned short msp3400_addrs[] = {
0x80 >> 1,
0x88 >> 1,
@@ -1678,6 +1706,46 @@ static inline void em28xx_set_model(struct em28xx *dev)
EM28XX_I2C_FREQ_100_KHZ;
}
+/* HINT method: webcam I2C chips
+ *
+ * This method work for webcams with Micron sensors
+ */
+static int em28xx_hint_sensor(struct em28xx *dev)
+{
+ int rc;
+ char *sensor_name;
+ unsigned char cmd;
+ __be16 version_be;
+ u16 version;
+
+ if (dev->model != EM2820_BOARD_UNKNOWN)
+ return 0;
+
+ dev->i2c_client.addr = 0xba >> 1;
+ cmd = 0;
+ i2c_master_send(&dev->i2c_client, &cmd, 1);
+ rc = i2c_master_recv(&dev->i2c_client, (char *)&version_be, 2);
+ if (rc != 2)
+ return -EINVAL;
+
+ version = be16_to_cpu(version_be);
+
+ switch (version) {
+ case MT9V011_VERSION:
+ dev->model = EM2820_BOARD_SILVERCREST_WEBCAM;
+ sensor_name = "mt9v011";
+ break;
+ default:
+ printk("Unknown Sensor 0x%04x\n", be16_to_cpu(version));
+ return -EINVAL;
+ }
+
+ em28xx_errdev("Sensor is %s, assuming that webcam is %s\n",
+ sensor_name, em28xx_boards[dev->model].name);
+
+ return 0;
+}
+
/* Since em28xx_pre_card_setup() requires a proper dev->model,
* this won't work for boards with generic PCI IDs
*/
@@ -1706,7 +1774,10 @@ void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_info("chip ID is em2750\n");
break;
case CHIP_ID_EM2820:
- em28xx_info("chip ID is em2820\n");
+ if (dev->board.is_27xx)
+ em28xx_info("chip is em2710\n");
+ else
+ em28xx_info("chip ID is em2820\n");
break;
case CHIP_ID_EM2840:
em28xx_info("chip ID is em2840\n");
@@ -2158,6 +2229,10 @@ void em28xx_card_setup(struct em28xx *dev)
before probing the i2c bus. */
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
break;
+ case EM2820_BOARD_SILVERCREST_WEBCAM:
+ /* FIXME: need to document the registers bellow */
+ em28xx_write_reg(dev, 0x0d, 0x42);
+ em28xx_write_reg(dev, 0x13, 0x08);
}
if (dev->board.has_snapshot_button)
@@ -2189,6 +2264,10 @@ void em28xx_card_setup(struct em28xx *dev)
v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"tvp5150", "tvp5150", tvp5150_addrs);
+ if (dev->board.decoder == EM28XX_MT9V011)
+ v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "mt9v011", "mt9v011", mt9v011_addrs);
+
if (dev->board.adecoder == EM28XX_TVAUDIO)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"tvaudio", "tvaudio", dev->board.tvaudio_addr);
@@ -2333,6 +2412,8 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
return errCode;
}
+ em28xx_hint_sensor(dev);
+
/* Do board specific init and eeprom reading */
em28xx_card_setup(dev);
@@ -2573,6 +2654,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
retval = em28xx_init_dev(&dev, udev, interface, nr);
if (retval) {
em28xx_devused &= ~(1<<dev->devno);
+ mutex_unlock(&dev->lock);
kfree(dev);
goto err;
}
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index c8d7ce8fbd36..079ab4d563a6 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -648,17 +648,28 @@ int em28xx_capture_start(struct em28xx *dev, int start)
int em28xx_set_outfmt(struct em28xx *dev)
{
int ret;
+ int vinmode, vinctl, outfmt;
+
+ outfmt = dev->format->reg;
+
+ if (dev->board.is_27xx) {
+ vinmode = 0x0d;
+ vinctl = 0x00;
+ } else {
+ vinmode = 0x10;
+ vinctl = 0x11;
+ }
ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT,
- dev->format->reg | 0x20, 0x3f);
+ outfmt | 0x20, 0xff);
if (ret < 0)
- return ret;
+ return ret;
- ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, 0x10);
+ ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, vinmode);
if (ret < 0)
return ret;
- return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, 0x11);
+ return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctl);
}
static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
@@ -695,13 +706,19 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v)
{
u8 mode;
/* the em2800 scaler only supports scaling down to 50% */
- if (dev->board.is_em2800)
+
+ if (dev->board.is_27xx) {
+ /* FIXME: Don't use the scaler yet */
+ mode = 0;
+ } else if (dev->board.is_em2800) {
mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00);
- else {
+ } else {
u8 buf[2];
+
buf[0] = h;
buf[1] = h >> 8;
em28xx_write_regs(dev, EM28XX_R30_HSCALELOW, (char *)buf, 2);
+
buf[0] = v;
buf[1] = v >> 8;
em28xx_write_regs(dev, EM28XX_R32_VSCALELOW, (char *)buf, 2);
@@ -720,8 +737,11 @@ int em28xx_resolution_set(struct em28xx *dev)
height = norm_maxh(dev) >> 1;
em28xx_set_outfmt(dev);
+
+
em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
+
return em28xx_scaler_set(dev, dev->hscale, dev->vscale);
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index e7b47c8da8f3..3da97c32b8fa 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -243,6 +243,14 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
};
+static struct zl10353_config em28xx_terratec_xs_zl10353_xc3028 = {
+ .demod_address = (0x1e >> 1),
+ .no_tuner = 1,
+ .disable_i2c_gate_ctrl = 1,
+ .parallel_ts = 1,
+ .if2 = 45600,
+};
+
#ifdef EM28XX_DRX397XD_SUPPORT
/* [TODO] djh - not sure yet what the device config needs to contain */
static struct drx397xD_config em28xx_drx397xD_with_xc3028 = {
@@ -433,7 +441,6 @@ static int dvb_init(struct em28xx *dev)
}
break;
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
- case EM2880_BOARD_TERRATEC_HYBRID_XS:
case EM2880_BOARD_KWORLD_DVB_310U:
case EM2880_BOARD_EMPIRE_DUAL_TV:
dvb->frontend = dvb_attach(zl10353_attach,
@@ -444,6 +451,25 @@ static int dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2880_BOARD_TERRATEC_HYBRID_XS:
+ dvb->frontend = dvb_attach(zl10353_attach,
+ &em28xx_terratec_xs_zl10353_xc3028,
+ &dev->i2c_adap);
+ if (dvb->frontend == NULL) {
+ /* This board could have either a zl10353 or a mt352.
+ If the chip id isn't for zl10353, try mt352 */
+
+ /* FIXME: make support for mt352 work */
+ printk(KERN_ERR "version of this board with mt352 not "
+ "currently supported\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+ if (attach_xc3028(0x61, dev) < 0) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
case EM2882_BOARD_EVGA_INDTUBE:
dvb->frontend = dvb_attach(s5h1409_attach,
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 2c86fcf089f5..27e33a287dfc 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -483,7 +483,7 @@ static char *i2c_devs[128] = {
[0xa0 >> 1] = "eeprom",
[0xb0 >> 1] = "tda9874",
[0xb8 >> 1] = "tvp5150a",
- [0xba >> 1] = "tvp5150a",
+ [0xba >> 1] = "webcam sensor or tvp5150a",
[0xc0 >> 1] = "tuner (analog)",
[0xc2 >> 1] = "tuner (analog)",
[0xc4 >> 1] = "tuner (analog)",
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 8fe1beecfffa..14316c912179 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -90,10 +90,35 @@ MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
/* supported video standards */
static struct em28xx_fmt format[] = {
{
- .name = "16bpp YUY2, 4:2:2, packed",
+ .name = "16 bpp YUY2, 4:2:2, packed",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.reg = EM28XX_OUTFMT_YUV422_Y0UY1V,
+ }, {
+ .name = "16 bpp RGB 565, LE",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .reg = EM28XX_OUTFMT_RGB_16_656,
+ }, {
+ .name = "8 bpp Bayer BGBG..GRGR",
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_BGBG,
+ }, {
+ .name = "8 bpp Bayer GRGR..BGBG",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_GRGR,
+ }, {
+ .name = "8 bpp Bayer GBGB..RGRG",
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_GBGB,
+ }, {
+ .name = "12 bpp YUV411",
+ .fourcc = V4L2_PIX_FMT_YUV411P,
+ .depth = 12,
+ .reg = EM28XX_OUTFMT_YUV411,
},
};
@@ -701,7 +726,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- if (dev->board.is_em2800) {
+ if (dev->board.is_27xx) {
+ /* FIXME: This is the only supported fmt */
+ width = 640;
+ height = 480;
+ } else if (dev->board.is_em2800) {
/* the em2800 can only scale down to 50% */
height = height > (3 * maxh / 4) ? maxh : maxh / 2;
width = width > (3 * maxw / 4) ? maxw : maxw / 2;
@@ -733,13 +762,40 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
+ unsigned width, unsigned height)
+{
+ struct em28xx_fmt *fmt;
+
+ /* FIXME: This is the only supported fmt */
+ if (dev->board.is_27xx) {
+ width = 640;
+ height = 480;
+ }
+
+ fmt = format_by_fourcc(fourcc);
+ if (!fmt)
+ return -EINVAL;
+
+ dev->format = fmt;
+ dev->width = width;
+ dev->height = height;
+
+ /* set new image size */
+ get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
+
+ em28xx_set_alternate(dev);
+ em28xx_resolution_set(dev);
+
+ return 0;
+}
+
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
int rc;
- struct em28xx_fmt *fmt;
rc = check_dev(dev);
if (rc < 0)
@@ -749,12 +805,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
vidioc_try_fmt_vid_cap(file, priv, f);
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt) {
- rc = -EINVAL;
- goto out;
- }
-
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
em28xx_errdev("%s queue busy\n", __func__);
rc = -EBUSY;
@@ -767,16 +817,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
goto out;
}
- /* set new image size */
- dev->width = f->fmt.pix.width;
- dev->height = f->fmt.pix.height;
- dev->format = fmt;
- get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
-
- em28xx_set_alternate(dev);
- em28xx_resolution_set(dev);
-
- rc = 0;
+ rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
+ f->fmt.pix.width, f->fmt.pix.height);
out:
mutex_unlock(&dev->lock);
@@ -1616,11 +1658,6 @@ static int em28xx_v4l2_open(struct file *filp)
filp->private_data = fh;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
- dev->width = norm_maxw(dev);
- dev->height = norm_maxh(dev);
- dev->hscale = 0;
- dev->vscale = 0;
-
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
em28xx_set_alternate(dev);
em28xx_resolution_set(dev);
@@ -1962,15 +1999,14 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* set default norm */
dev->norm = em28xx_video_template.current_norm;
- dev->width = norm_maxw(dev);
- dev->height = norm_maxh(dev);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
- dev->hscale = 0;
- dev->vscale = 0;
dev->ctl_input = 0;
/* Analog specific initialization */
dev->format = &format[0];
+ em28xx_set_video_format(dev, format[0].fourcc,
+ norm_maxw(dev), norm_maxh(dev));
+
video_mux(dev, dev->ctl_input);
/* Audio defaults */
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 813ce45c2f99..d90fef463764 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -107,6 +107,7 @@
#define EM2860_BOARD_TERRATEC_AV350 68
#define EM2882_BOARD_KWORLD_ATSC_315U 69
#define EM2882_BOARD_EVGA_INDTUBE 70
+#define EM2820_BOARD_SILVERCREST_WEBCAM 71
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -360,6 +361,7 @@ enum em28xx_decoder {
EM28XX_NODECODER,
EM28XX_TVP5150,
EM28XX_SAA711X,
+ EM28XX_MT9V011,
};
enum em28xx_adecoder {
@@ -388,6 +390,7 @@ struct em28xx_board {
unsigned int max_range_640_480:1;
unsigned int has_dvb:1;
unsigned int has_snapshot_button:1;
+ unsigned int is_27xx:1;
unsigned int valid:1;
unsigned char xclk, i2c_speed;
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 9df7137fe67e..992ce530f138 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -36,10 +36,6 @@
#define STV_ISOC_ENDPOINT_ADDR 0x81
-#ifndef V4L2_PIX_FMT_SGRBG8
-#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G')
-#endif
-
#define STV_REG23 0x0423
/* Control registers of the STV0600 ASIC */
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 558f8a837ff4..fa360d3c22fa 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -218,7 +218,7 @@ MODULE_PARM_DESC(ivtv_yuv_mode,
"\t\t\tDefault: 0 (interlaced)");
MODULE_PARM_DESC(ivtv_yuv_threshold,
"If ivtv_yuv_mode is 2 (auto) then playback content as\n\t\tprogressive if src height <= ivtv_yuvthreshold\n"
- "\t\t\tDefault: 480");;
+ "\t\t\tDefault: 480");
MODULE_PARM_DESC(enc_mpg_buffers,
"Encoder MPG Buffers (in MB)\n"
"\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_MPG_BUFFERS));
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 1d66855a379a..d0765bed79c9 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1915,8 +1915,7 @@ static void __devexit meye_remove(struct pci_dev *pcidev)
}
static struct pci_device_id meye_pci_tbl[] = {
- { PCI_VENDOR_ID_KAWASAKI, PCI_DEVICE_ID_MCHIP_KL5A72002,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(KAWASAKI, PCI_DEVICE_ID_MCHIP_KL5A72002), 0 },
{ }
};
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
new file mode 100644
index 000000000000..1fe8fc9183a7
--- /dev/null
+++ b/drivers/media/video/mt9v011.c
@@ -0,0 +1,431 @@
+/*
+ * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
+ *
+ * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ * This code is placed under the terms of the GNU General Public License v2
+ */
+
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <media/v4l2-device.h>
+#include "mt9v011.h"
+#include <media/v4l2-i2c-drv.h>
+#include <media/v4l2-chip-ident.h>
+
+MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_LICENSE("GPL");
+
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* supported controls */
+static struct v4l2_queryctrl mt9v011_qctrl[] = {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = (1 << 10) - 1,
+ .step = 1,
+ .default_value = 0x0020,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = -1 << 9,
+ .maximum = (1 << 9) - 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = -1 << 9,
+ .maximum = (1 << 9) - 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ },
+};
+
+struct mt9v011 {
+ struct v4l2_subdev sd;
+ unsigned width, height;
+
+ u16 global_gain, red_bal, blue_bal;
+};
+
+static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9v011, sd);
+}
+
+static int mt9v011_read(struct v4l2_subdev *sd, unsigned char addr)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ __be16 buffer;
+ int rc, val;
+
+ rc = i2c_master_send(c, &addr, 1);
+ if (rc != 1)
+ v4l2_dbg(0, debug, sd,
+ "i2c i/o error: rc == %d (should be 1)\n", rc);
+
+ msleep(10);
+
+ rc = i2c_master_recv(c, (char *)&buffer, 2);
+ if (rc != 2)
+ v4l2_dbg(0, debug, sd,
+ "i2c i/o error: rc == %d (should be 2)\n", rc);
+
+ val = be16_to_cpu(buffer);
+
+ v4l2_dbg(2, debug, sd, "mt9v011: read 0x%02x = 0x%04x\n", addr, val);
+
+ return val;
+}
+
+static void mt9v011_write(struct v4l2_subdev *sd, unsigned char addr,
+ u16 value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ unsigned char buffer[3];
+ int rc;
+
+ buffer[0] = addr;
+ buffer[1] = value >> 8;
+ buffer[2] = value & 0xff;
+
+ v4l2_dbg(2, debug, sd,
+ "mt9v011: writing 0x%02x 0x%04x\n", buffer[0], value);
+ rc = i2c_master_send(c, buffer, 3);
+ if (rc != 3)
+ v4l2_dbg(0, debug, sd,
+ "i2c i/o error: rc == %d (should be 3)\n", rc);
+}
+
+
+struct i2c_reg_value {
+ unsigned char reg;
+ u16 value;
+};
+
+/*
+ * Values used at the original driver
+ * Some values are marked as Reserved at the datasheet
+ */
+static const struct i2c_reg_value mt9v011_init_default[] = {
+ { R0D_MT9V011_RESET, 0x0001 },
+ { R0D_MT9V011_RESET, 0x0000 },
+
+ { R0C_MT9V011_SHUTTER_DELAY, 0x0000 },
+ { R09_MT9V011_SHUTTER_WIDTH, 0x1fc },
+
+ { R0A_MT9V011_CLK_SPEED, 0x0000 },
+ { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 },
+ { R20_MT9V011_READ_MODE, 0x1000 },
+
+ { R07_MT9V011_OUT_CTRL, 0x000a }, /* chip enable */
+};
+
+static void set_balance(struct v4l2_subdev *sd)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ u16 green1_gain, green2_gain, blue_gain, red_gain;
+
+ green1_gain = core->global_gain;
+ green2_gain = core->global_gain;
+
+ blue_gain = core->global_gain +
+ core->global_gain * core->blue_bal / (1 << 9);
+
+ red_gain = core->global_gain +
+ core->global_gain * core->blue_bal / (1 << 9);
+
+ mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain);
+ mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain);
+ mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
+ mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
+}
+
+static void set_res(struct v4l2_subdev *sd)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ unsigned vstart, hstart;
+
+ /*
+ * The mt9v011 doesn't have scaling. So, in order to select the desired
+ * resolution, we're cropping at the middle of the sensor.
+ * hblank and vblank should be adjusted, in order to warrant that
+ * we'll preserve the line timings for 30 fps, no matter what resolution
+ * is selected.
+ * NOTE: datasheet says that width (and height) should be filled with
+ * width-1. However, this doesn't work, since one pixel per line will
+ * be missing.
+ */
+
+ hstart = 14 + (640 - core->width) / 2;
+ mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
+ mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
+ mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
+
+ vstart = 8 + (640 - core->height) / 2;
+ mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart);
+ mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height);
+ mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height);
+};
+
+static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt9v011_init_default); i++)
+ mt9v011_write(sd, mt9v011_init_default[i].reg,
+ mt9v011_init_default[i].value);
+
+ set_balance(sd);
+ set_res(sd);
+
+ return 0;
+};
+
+static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+
+ v4l2_dbg(1, debug, sd, "g_ctrl called\n");
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ctrl->value = core->global_gain;
+ return 0;
+ case V4L2_CID_RED_BALANCE:
+ ctrl->value = core->red_bal;
+ return 0;
+ case V4L2_CID_BLUE_BALANCE:
+ ctrl->value = core->blue_bal;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ u8 i, n;
+ n = ARRAY_SIZE(mt9v011_qctrl);
+
+ for (i = 0; i < n; i++) {
+ if (ctrl->id != mt9v011_qctrl[i].id)
+ continue;
+ if (ctrl->value < mt9v011_qctrl[i].minimum ||
+ ctrl->value > mt9v011_qctrl[i].maximum)
+ return -ERANGE;
+ v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
+ ctrl->id, ctrl->value);
+ break;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ core->global_gain = ctrl->value;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ core->red_bal = ctrl->value;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ core->blue_bal = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set_balance(sd);
+
+ return 0;
+}
+
+static int mt9v011_enum_fmt(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ fmt->flags = 0;
+ strcpy(fmt->description, "8 bpp Bayer GRGR..BGBG");
+ fmt->pixelformat = V4L2_PIX_FMT_SGRBG8;
+
+ return 0;
+}
+
+static int mt9v011_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->pixelformat != V4L2_PIX_FMT_SGRBG8)
+ return -EINVAL;
+
+ v4l_bound_align_image(&pix->width, 48, 639, 1,
+ &pix->height, 32, 480, 1, 0);
+
+ return 0;
+}
+
+static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct mt9v011 *core = to_mt9v011(sd);
+ int rc;
+
+ rc = mt9v011_try_fmt(sd, fmt);
+ if (rc < 0)
+ return -EINVAL;
+
+ core->width = pix->width;
+ core->height = pix->height;
+
+ set_res(sd);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mt9v011_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!v4l2_chip_match_i2c_client(client, &reg->match))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ reg->val = mt9v011_read(sd, reg->reg & 0xff);
+ reg->size = 2;
+
+ return 0;
+}
+
+static int mt9v011_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!v4l2_chip_match_i2c_client(client, &reg->match))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mt9v011_write(sd, reg->reg & 0xff, reg->val & 0xffff);
+
+ return 0;
+}
+#endif
+
+static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011,
+ MT9V011_VERSION);
+}
+
+static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
+ .g_ctrl = mt9v011_g_ctrl,
+ .s_ctrl = mt9v011_s_ctrl,
+ .reset = mt9v011_reset,
+ .g_chip_ident = mt9v011_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = mt9v011_g_register,
+ .s_register = mt9v011_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops mt9v011_video_ops = {
+ .enum_fmt = mt9v011_enum_fmt,
+ .try_fmt = mt9v011_try_fmt,
+ .s_fmt = mt9v011_s_fmt,
+};
+
+static const struct v4l2_subdev_ops mt9v011_ops = {
+ .core = &mt9v011_core_ops,
+ .video = &mt9v011_video_ops,
+};
+
+
+/****************************************************************************
+ I2C Client & Driver
+ ****************************************************************************/
+
+static int mt9v011_probe(struct i2c_client *c,
+ const struct i2c_device_id *id)
+{
+ u16 version;
+ struct mt9v011 *core;
+ struct v4l2_subdev *sd;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(c->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EIO;
+
+ core = kzalloc(sizeof(struct mt9v011), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ sd = &core->sd;
+ v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
+
+ /* Check if the sensor is really a MT9V011 */
+ version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
+ if (version != MT9V011_VERSION) {
+ v4l2_info(sd, "*** unknown micron chip detected (0x%04x.\n",
+ version);
+ kfree(core);
+ return -EINVAL;
+ }
+
+ core->global_gain = 0x0024;
+ core->width = 640;
+ core->height = 480;
+
+ v4l_info(c, "chip found @ 0x%02x (%s)\n",
+ c->addr << 1, c->adapter->name);
+
+ return 0;
+}
+
+static int mt9v011_remove(struct i2c_client *c)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(c);
+
+ v4l2_dbg(1, debug, sd,
+ "mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
+ c->addr << 1);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(to_mt9v011(sd));
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id mt9v011_id[] = {
+ { "mt9v011", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9v011_id);
+
+static struct v4l2_i2c_driver_data v4l2_i2c_data = {
+ .name = "mt9v011",
+ .probe = mt9v011_probe,
+ .remove = mt9v011_remove,
+ .id_table = mt9v011_id,
+};
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
new file mode 100644
index 000000000000..9e443ee30558
--- /dev/null
+++ b/drivers/media/video/mt9v011.h
@@ -0,0 +1,35 @@
+/*
+ * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
+ *
+ * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ * This code is placed under the terms of the GNU General Public License v2
+ */
+
+#ifndef MT9V011_H_
+#define MT9V011_H_
+
+#define R00_MT9V011_CHIP_VERSION 0x00
+#define R01_MT9V011_ROWSTART 0x01
+#define R02_MT9V011_COLSTART 0x02
+#define R03_MT9V011_HEIGHT 0x03
+#define R04_MT9V011_WIDTH 0x04
+#define R05_MT9V011_HBLANK 0x05
+#define R06_MT9V011_VBLANK 0x06
+#define R07_MT9V011_OUT_CTRL 0x07
+#define R09_MT9V011_SHUTTER_WIDTH 0x09
+#define R0A_MT9V011_CLK_SPEED 0x0a
+#define R0B_MT9V011_RESTART 0x0b
+#define R0C_MT9V011_SHUTTER_DELAY 0x0c
+#define R0D_MT9V011_RESET 0x0d
+#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
+#define R20_MT9V011_READ_MODE 0x20
+#define R2B_MT9V011_GREEN_1_GAIN 0x2b
+#define R2C_MT9V011_BLUE_GAIN 0x2c
+#define R2D_MT9V011_RED_GAIN 0x2d
+#define R2E_MT9V011_GREEN_2_GAIN 0x2e
+#define R35_MT9V011_GLOBAL_GAIN 0x35
+#define RF1_MT9V011_CHIP_ENABLE 0xf1
+
+#define MT9V011_VERSION 0x8243
+
+#endif
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 0be6f814f539..bcd632003f48 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -136,12 +136,6 @@
#define DEVICE_USE_CODEC3(x) ((x)>=700)
#define DEVICE_USE_CODEC23(x) ((x)>=675)
-
-#ifndef V4L2_PIX_FMT_PWC1
-#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1')
-#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2')
-#endif
-
/* The following structures were based on cpia.h. Why reinvent the wheel? :-) */
struct pwc_iso_buf
{
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index ba87128542e0..bf4c99d19150 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1790,7 +1790,7 @@ static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
if (0 != err)
return err;
- if (i < 0 || i >= SAA7134_INPUT_MAX)
+ if (i >= SAA7134_INPUT_MAX)
return -EINVAL;
if (NULL == card_in(dev, i).name)
return -EINVAL;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 16f595d4337a..9f5ae8167855 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -237,11 +237,11 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
return -ENOMEM;
icd->num_user_formats = fmts;
- fmts = 0;
dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts);
/* Second pass - actually fill data formats */
+ fmts = 0;
for (i = 0; i < icd->num_formats; i++)
if (!ici->ops->get_formats) {
icd->user_formats[i].host_fmt = icd->formats + i;
@@ -877,8 +877,11 @@ static int soc_camera_probe(struct device *dev)
(unsigned short)~0;
ret = soc_camera_init_user_formats(icd);
- if (ret < 0)
+ if (ret < 0) {
+ if (icd->ops->remove)
+ icd->ops->remove(icd);
goto eiufmt;
+ }
icd->height = DEFAULT_HEIGHT;
icd->width = DEFAULT_WIDTH;
@@ -902,8 +905,10 @@ static int soc_camera_remove(struct device *dev)
{
struct soc_camera_device *icd = to_soc_camera_dev(dev);
+ mutex_lock(&icd->video_lock);
if (icd->ops->remove)
icd->ops->remove(icd);
+ mutex_unlock(&icd->video_lock);
soc_camera_free_user_formats(icd);
@@ -1145,6 +1150,7 @@ evidallocd:
}
EXPORT_SYMBOL(soc_camera_video_start);
+/* Called from client .remove() methods with .video_lock held */
void soc_camera_video_stop(struct soc_camera_device *icd)
{
struct video_device *vdev = icd->vdev;
@@ -1154,10 +1160,8 @@ void soc_camera_video_stop(struct soc_camera_device *icd)
if (!icd->dev.parent || !vdev)
return;
- mutex_lock(&icd->video_lock);
video_unregister_device(vdev);
icd->vdev = NULL;
- mutex_unlock(&icd->video_lock);
}
EXPORT_SYMBOL(soc_camera_video_stop);
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 36a6ba92df27..70043b1704fb 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -34,7 +34,7 @@
static struct uvc_control_info uvc_ctrls[] = {
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_BRIGHTNESS_CONTROL,
+ .selector = UVC_PU_BRIGHTNESS_CONTROL,
.index = 0,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -42,7 +42,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_CONTRAST_CONTROL,
+ .selector = UVC_PU_CONTRAST_CONTROL,
.index = 1,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -50,7 +50,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_HUE_CONTROL,
+ .selector = UVC_PU_HUE_CONTROL,
.index = 2,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -58,7 +58,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_SATURATION_CONTROL,
+ .selector = UVC_PU_SATURATION_CONTROL,
.index = 3,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -66,7 +66,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_SHARPNESS_CONTROL,
+ .selector = UVC_PU_SHARPNESS_CONTROL,
.index = 4,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -74,7 +74,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_GAMMA_CONTROL,
+ .selector = UVC_PU_GAMMA_CONTROL,
.index = 5,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -82,7 +82,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.index = 6,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -90,7 +90,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_COMPONENT_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.index = 7,
.size = 4,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -98,7 +98,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_BACKLIGHT_COMPENSATION_CONTROL,
+ .selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.index = 8,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -106,7 +106,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_GAIN_CONTROL,
+ .selector = UVC_PU_GAIN_CONTROL,
.index = 9,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -114,7 +114,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_POWER_LINE_FREQUENCY_CONTROL,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.index = 10,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -122,7 +122,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_HUE_AUTO_CONTROL,
+ .selector = UVC_PU_HUE_AUTO_CONTROL,
.index = 11,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -130,7 +130,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.index = 12,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -138,7 +138,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.index = 13,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -146,7 +146,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_DIGITAL_MULTIPLIER_CONTROL,
+ .selector = UVC_PU_DIGITAL_MULTIPLIER_CONTROL,
.index = 14,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -154,7 +154,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL,
+ .selector = UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL,
.index = 15,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -162,21 +162,21 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_ANALOG_VIDEO_STANDARD_CONTROL,
+ .selector = UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL,
.index = 16,
.size = 1,
.flags = UVC_CONTROL_GET_CUR,
},
{
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_ANALOG_LOCK_STATUS_CONTROL,
+ .selector = UVC_PU_ANALOG_LOCK_STATUS_CONTROL,
.index = 17,
.size = 1,
.flags = UVC_CONTROL_GET_CUR,
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_SCANNING_MODE_CONTROL,
+ .selector = UVC_CT_SCANNING_MODE_CONTROL,
.index = 0,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -184,7 +184,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_AE_MODE_CONTROL,
+ .selector = UVC_CT_AE_MODE_CONTROL,
.index = 1,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -193,7 +193,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_AE_PRIORITY_CONTROL,
+ .selector = UVC_CT_AE_PRIORITY_CONTROL,
.index = 2,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -201,7 +201,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.index = 3,
.size = 4,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -209,7 +209,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_EXPOSURE_TIME_RELATIVE_CONTROL,
+ .selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL,
.index = 4,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -217,7 +217,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_FOCUS_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.index = 5,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -225,7 +225,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_FOCUS_RELATIVE_CONTROL,
+ .selector = UVC_CT_FOCUS_RELATIVE_CONTROL,
.index = 6,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -233,7 +233,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_IRIS_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
.index = 7,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -241,7 +241,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_IRIS_RELATIVE_CONTROL,
+ .selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.index = 8,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -249,7 +249,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_ZOOM_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.index = 9,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -257,7 +257,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_ZOOM_RELATIVE_CONTROL,
+ .selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.index = 10,
.size = 3,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -265,7 +265,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_PANTILT_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.index = 11,
.size = 8,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -273,7 +273,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_PANTILT_RELATIVE_CONTROL,
+ .selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.index = 12,
.size = 4,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -281,7 +281,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_ROLL_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_ROLL_ABSOLUTE_CONTROL,
.index = 13,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -289,7 +289,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_ROLL_RELATIVE_CONTROL,
+ .selector = UVC_CT_ROLL_RELATIVE_CONTROL,
.index = 14,
.size = 2,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
@@ -297,7 +297,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_FOCUS_AUTO_CONTROL,
+ .selector = UVC_CT_FOCUS_AUTO_CONTROL,
.index = 17,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -305,7 +305,7 @@ static struct uvc_control_info uvc_ctrls[] = {
},
{
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_PRIVACY_CONTROL,
+ .selector = UVC_CT_PRIVACY_CONTROL,
.index = 18,
.size = 1,
.flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
@@ -332,13 +332,13 @@ static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
__s8 zoom = (__s8)data[0];
switch (query) {
- case GET_CUR:
+ case UVC_GET_CUR:
return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]);
- case GET_MIN:
- case GET_MAX:
- case GET_RES:
- case GET_DEF:
+ case UVC_GET_MIN:
+ case UVC_GET_MAX:
+ case UVC_GET_RES:
+ case UVC_GET_DEF:
default:
return data[2];
}
@@ -356,7 +356,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_BRIGHTNESS,
.name = "Brightness",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_BRIGHTNESS_CONTROL,
+ .selector = UVC_PU_BRIGHTNESS_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -366,7 +366,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_CONTRAST,
.name = "Contrast",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_CONTRAST_CONTROL,
+ .selector = UVC_PU_CONTRAST_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -376,7 +376,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_HUE,
.name = "Hue",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_HUE_CONTROL,
+ .selector = UVC_PU_HUE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -386,7 +386,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_SATURATION,
.name = "Saturation",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_SATURATION_CONTROL,
+ .selector = UVC_PU_SATURATION_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -396,7 +396,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_SHARPNESS,
.name = "Sharpness",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_SHARPNESS_CONTROL,
+ .selector = UVC_PU_SHARPNESS_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -406,7 +406,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_GAMMA,
.name = "Gamma",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_GAMMA_CONTROL,
+ .selector = UVC_PU_GAMMA_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -416,7 +416,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_BACKLIGHT_COMPENSATION,
.name = "Backlight Compensation",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_BACKLIGHT_COMPENSATION_CONTROL,
+ .selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -426,7 +426,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_GAIN,
.name = "Gain",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_GAIN_CONTROL,
+ .selector = UVC_PU_GAIN_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -436,7 +436,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.name = "Power Line Frequency",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_POWER_LINE_FREQUENCY_CONTROL,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.size = 2,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_MENU,
@@ -448,7 +448,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_HUE_AUTO,
.name = "Hue, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_HUE_AUTO_CONTROL,
+ .selector = UVC_PU_HUE_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -458,7 +458,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_EXPOSURE_AUTO,
.name = "Exposure, Auto",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_AE_MODE_CONTROL,
+ .selector = UVC_CT_AE_MODE_CONTROL,
.size = 4,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_MENU,
@@ -470,7 +470,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
.name = "Exposure, Auto Priority",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_AE_PRIORITY_CONTROL,
+ .selector = UVC_CT_AE_PRIORITY_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -480,7 +480,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_EXPOSURE_ABSOLUTE,
.name = "Exposure (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.size = 32,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -490,7 +490,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.name = "White Balance Temperature, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -500,7 +500,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
.name = "White Balance Temperature",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -510,7 +510,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.name = "White Balance Component, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -520,7 +520,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_BLUE_BALANCE,
.name = "White Balance Blue Component",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_COMPONENT_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -530,7 +530,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_RED_BALANCE,
.name = "White Balance Red Component",
.entity = UVC_GUID_UVC_PROCESSING,
- .selector = PU_WHITE_BALANCE_COMPONENT_CONTROL,
+ .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
.offset = 16,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -540,7 +540,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_FOCUS_ABSOLUTE,
.name = "Focus (absolute)",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_FOCUS_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -550,7 +550,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_FOCUS_AUTO,
.name = "Focus, Auto",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_FOCUS_AUTO_CONTROL,
+ .selector = UVC_CT_FOCUS_AUTO_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -560,7 +560,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_ZOOM_ABSOLUTE,
.name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_ZOOM_ABSOLUTE_CONTROL,
+ .selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.size = 16,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -570,7 +570,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_ZOOM_CONTINUOUS,
.name = "Zoom, Continuous",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_ZOOM_RELATIVE_CONTROL,
+ .selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.size = 0,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
@@ -582,7 +582,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.id = V4L2_CID_PRIVACY,
.name = "Privacy",
.entity = UVC_GUID_UVC_CAMERA,
- .selector = CT_PRIVACY_CONTROL,
+ .selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -675,16 +675,16 @@ static const __u8 uvc_media_transport_input_guid[16] =
static int uvc_entity_match_guid(struct uvc_entity *entity, __u8 guid[16])
{
switch (UVC_ENTITY_TYPE(entity)) {
- case ITT_CAMERA:
+ case UVC_ITT_CAMERA:
return memcmp(uvc_camera_guid, guid, 16) == 0;
- case ITT_MEDIA_TRANSPORT_INPUT:
+ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
return memcmp(uvc_media_transport_input_guid, guid, 16) == 0;
- case VC_PROCESSING_UNIT:
+ case UVC_VC_PROCESSING_UNIT:
return memcmp(uvc_processing_guid, guid, 16) == 0;
- case VC_EXTENSION_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
return memcmp(entity->extension.guidExtensionCode,
guid, 16) == 0;
@@ -793,11 +793,13 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ctrl->info->flags & UVC_CONTROL_GET_DEF) {
- if ((ret = uvc_query_ctrl(video->dev, GET_DEF, ctrl->entity->id,
- video->dev->intfnum, ctrl->info->selector,
- data, ctrl->info->size)) < 0)
+ ret = uvc_query_ctrl(video->dev, UVC_GET_DEF, ctrl->entity->id,
+ video->dev->intfnum, ctrl->info->selector, data,
+ ctrl->info->size);
+ if (ret < 0)
goto out;
- v4l2_ctrl->default_value = mapping->get(mapping, GET_DEF, data);
+ v4l2_ctrl->default_value =
+ mapping->get(mapping, UVC_GET_DEF, data);
}
switch (mapping->v4l2_type) {
@@ -829,25 +831,28 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
}
if (ctrl->info->flags & UVC_CONTROL_GET_MIN) {
- if ((ret = uvc_query_ctrl(video->dev, GET_MIN, ctrl->entity->id,
- video->dev->intfnum, ctrl->info->selector,
- data, ctrl->info->size)) < 0)
+ ret = uvc_query_ctrl(video->dev, UVC_GET_MIN, ctrl->entity->id,
+ video->dev->intfnum, ctrl->info->selector, data,
+ ctrl->info->size);
+ if (ret < 0)
goto out;
- v4l2_ctrl->minimum = mapping->get(mapping, GET_MIN, data);
+ v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN, data);
}
if (ctrl->info->flags & UVC_CONTROL_GET_MAX) {
- if ((ret = uvc_query_ctrl(video->dev, GET_MAX, ctrl->entity->id,
- video->dev->intfnum, ctrl->info->selector,
- data, ctrl->info->size)) < 0)
+ ret = uvc_query_ctrl(video->dev, UVC_GET_MAX, ctrl->entity->id,
+ video->dev->intfnum, ctrl->info->selector, data,
+ ctrl->info->size);
+ if (ret < 0)
goto out;
- v4l2_ctrl->maximum = mapping->get(mapping, GET_MAX, data);
+ v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX, data);
}
if (ctrl->info->flags & UVC_CONTROL_GET_RES) {
- if ((ret = uvc_query_ctrl(video->dev, GET_RES, ctrl->entity->id,
- video->dev->intfnum, ctrl->info->selector,
- data, ctrl->info->size)) < 0)
+ ret = uvc_query_ctrl(video->dev, UVC_GET_RES, ctrl->entity->id,
+ video->dev->intfnum, ctrl->info->selector, data,
+ ctrl->info->size);
+ if (ret < 0)
goto out;
- v4l2_ctrl->step = mapping->get(mapping, GET_RES, data);
+ v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, data);
}
ret = 0;
@@ -912,7 +917,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
continue;
if (!rollback)
- ret = uvc_query_ctrl(dev, SET_CUR, ctrl->entity->id,
+ ret = uvc_query_ctrl(dev, UVC_SET_CUR, ctrl->entity->id,
dev->intfnum, ctrl->info->selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info->size);
@@ -974,7 +979,7 @@ int uvc_ctrl_get(struct uvc_video_device *video,
return -EINVAL;
if (!ctrl->loaded) {
- ret = uvc_query_ctrl(video->dev, GET_CUR, ctrl->entity->id,
+ ret = uvc_query_ctrl(video->dev, UVC_GET_CUR, ctrl->entity->id,
video->dev->intfnum, ctrl->info->selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info->size);
@@ -984,7 +989,7 @@ int uvc_ctrl_get(struct uvc_video_device *video,
ctrl->loaded = 1;
}
- xctrl->value = mapping->get(mapping, GET_CUR,
+ xctrl->value = mapping->get(mapping, UVC_GET_CUR,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
@@ -1023,7 +1028,7 @@ int uvc_ctrl_set(struct uvc_video_device *video,
memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
0, ctrl->info->size);
} else {
- ret = uvc_query_ctrl(video->dev, GET_CUR,
+ ret = uvc_query_ctrl(video->dev, UVC_GET_CUR,
ctrl->entity->id, video->dev->intfnum,
ctrl->info->selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
@@ -1115,9 +1120,9 @@ int uvc_xu_ctrl_query(struct uvc_video_device *video,
goto out;
}
- ret = uvc_query_ctrl(video->dev, set ? SET_CUR : GET_CUR, xctrl->unit,
- video->dev->intfnum, xctrl->selector, data,
- xctrl->size);
+ ret = uvc_query_ctrl(video->dev, set ? UVC_SET_CUR : UVC_GET_CUR,
+ xctrl->unit, video->dev->intfnum, xctrl->selector,
+ data, xctrl->size);
if (ret < 0)
goto out;
@@ -1211,7 +1216,7 @@ static void uvc_ctrl_add_ctrl(struct uvc_device *dev,
if (!found)
return;
- if (UVC_ENTITY_TYPE(entity) == VC_EXTENSION_UNIT) {
+ if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) {
/* Check if the device control information and length match
* the user supplied information.
*/
@@ -1219,8 +1224,9 @@ static void uvc_ctrl_add_ctrl(struct uvc_device *dev,
__le16 size;
__u8 inf;
- if ((ret = uvc_query_ctrl(dev, GET_LEN, ctrl->entity->id,
- dev->intfnum, info->selector, (__u8 *)&size, 2)) < 0) {
+ ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id,
+ dev->intfnum, info->selector, (__u8 *)&size, 2);
+ if (ret < 0) {
uvc_trace(UVC_TRACE_CONTROL, "GET_LEN failed on "
"control " UVC_GUID_FORMAT "/%u (%d).\n",
UVC_GUID_ARGS(info->entity), info->selector,
@@ -1236,8 +1242,9 @@ static void uvc_ctrl_add_ctrl(struct uvc_device *dev,
return;
}
- if ((ret = uvc_query_ctrl(dev, GET_INFO, ctrl->entity->id,
- dev->intfnum, info->selector, &inf, 1)) < 0) {
+ ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
+ dev->intfnum, info->selector, &inf, 1);
+ if (ret < 0) {
uvc_trace(UVC_TRACE_CONTROL, "GET_INFO failed on "
"control " UVC_GUID_FORMAT "/%u (%d).\n",
UVC_GUID_ARGS(info->entity), info->selector,
@@ -1391,7 +1398,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
unsigned int size;
unsigned int i;
- if (UVC_ENTITY_TYPE(entity) != VC_PROCESSING_UNIT)
+ if (UVC_ENTITY_TYPE(entity) != UVC_VC_PROCESSING_UNIT)
return;
controls = entity->processing.bmControls;
@@ -1427,13 +1434,13 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
unsigned int bControlSize = 0, ncontrols = 0;
__u8 *bmControls = NULL;
- if (UVC_ENTITY_TYPE(entity) == VC_EXTENSION_UNIT) {
+ if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) {
bmControls = entity->extension.bmControls;
bControlSize = entity->extension.bControlSize;
- } else if (UVC_ENTITY_TYPE(entity) == VC_PROCESSING_UNIT) {
+ } else if (UVC_ENTITY_TYPE(entity) == UVC_VC_PROCESSING_UNIT) {
bmControls = entity->processing.bmControls;
bControlSize = entity->processing.bControlSize;
- } else if (UVC_ENTITY_TYPE(entity) == ITT_CAMERA) {
+ } else if (UVC_ENTITY_TYPE(entity) == UVC_ITT_CAMERA) {
bmControls = entity->camera.bmControls;
bControlSize = entity->camera.bControlSize;
}
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 89927b7aec28..c080412b9d86 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -249,23 +249,23 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev,
list_for_each_entry_continue(entity, &dev->entities, list) {
switch (UVC_ENTITY_TYPE(entity)) {
- case TT_STREAMING:
+ case UVC_TT_STREAMING:
if (entity->output.bSourceID == id)
return entity;
break;
- case VC_PROCESSING_UNIT:
+ case UVC_VC_PROCESSING_UNIT:
if (entity->processing.bSourceID == id)
return entity;
break;
- case VC_SELECTOR_UNIT:
+ case UVC_VC_SELECTOR_UNIT:
for (i = 0; i < entity->selector.bNrInPins; ++i)
if (entity->selector.baSourceID[i] == id)
return entity;
break;
- case VC_EXTENSION_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
for (i = 0; i < entity->extension.bNrInPins; ++i)
if (entity->extension.baSourceID[i] == id)
return entity;
@@ -297,9 +297,9 @@ static int uvc_parse_format(struct uvc_device *dev,
format->index = buffer[3];
switch (buffer[2]) {
- case VS_FORMAT_UNCOMPRESSED:
- case VS_FORMAT_FRAME_BASED:
- n = buffer[2] == VS_FORMAT_UNCOMPRESSED ? 27 : 28;
+ case UVC_VS_FORMAT_UNCOMPRESSED:
+ case UVC_VS_FORMAT_FRAME_BASED:
+ n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28;
if (buflen < n) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT error\n",
@@ -325,16 +325,16 @@ static int uvc_parse_format(struct uvc_device *dev,
}
format->bpp = buffer[21];
- if (buffer[2] == VS_FORMAT_UNCOMPRESSED) {
- ftype = VS_FRAME_UNCOMPRESSED;
+ if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
+ ftype = UVC_VS_FRAME_UNCOMPRESSED;
} else {
- ftype = VS_FRAME_FRAME_BASED;
+ ftype = UVC_VS_FRAME_FRAME_BASED;
if (buffer[27])
format->flags = UVC_FMT_FLAG_COMPRESSED;
}
break;
- case VS_FORMAT_MJPEG:
+ case UVC_VS_FORMAT_MJPEG:
if (buflen < 11) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT error\n",
@@ -347,10 +347,10 @@ static int uvc_parse_format(struct uvc_device *dev,
format->fcc = V4L2_PIX_FMT_MJPEG;
format->flags = UVC_FMT_FLAG_COMPRESSED;
format->bpp = 0;
- ftype = VS_FRAME_MJPEG;
+ ftype = UVC_VS_FRAME_MJPEG;
break;
- case VS_FORMAT_DV:
+ case UVC_VS_FORMAT_DV:
if (buflen < 9) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT error\n",
@@ -395,8 +395,8 @@ static int uvc_parse_format(struct uvc_device *dev,
format->nframes = 1;
break;
- case VS_FORMAT_MPEG2TS:
- case VS_FORMAT_STREAM_BASED:
+ case UVC_VS_FORMAT_MPEG2TS:
+ case UVC_VS_FORMAT_STREAM_BASED:
/* Not supported yet. */
default:
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
@@ -416,7 +416,7 @@ static int uvc_parse_format(struct uvc_device *dev,
*/
while (buflen > 2 && buffer[2] == ftype) {
frame = &format->frame[format->nframes];
- if (ftype != VS_FRAME_FRAME_BASED)
+ if (ftype != UVC_VS_FRAME_FRAME_BASED)
n = buflen > 25 ? buffer[25] : 0;
else
n = buflen > 21 ? buffer[21] : 0;
@@ -436,7 +436,7 @@ static int uvc_parse_format(struct uvc_device *dev,
frame->wHeight = get_unaligned_le16(&buffer[7]);
frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
- if (ftype != VS_FRAME_FRAME_BASED) {
+ if (ftype != UVC_VS_FRAME_FRAME_BASED) {
frame->dwMaxVideoFrameBufferSize =
get_unaligned_le32(&buffer[17]);
frame->dwDefaultFrameInterval =
@@ -491,12 +491,12 @@ static int uvc_parse_format(struct uvc_device *dev,
buffer += buffer[0];
}
- if (buflen > 2 && buffer[2] == VS_STILL_IMAGE_FRAME) {
+ if (buflen > 2 && buffer[2] == UVC_VS_STILL_IMAGE_FRAME) {
buflen -= buffer[0];
buffer += buffer[0];
}
- if (buflen > 2 && buffer[2] == VS_COLORFORMAT) {
+ if (buflen > 2 && buffer[2] == UVC_VS_COLORFORMAT) {
if (buflen < 6) {
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d COLORFORMAT error\n",
@@ -530,7 +530,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
int ret = -EINVAL;
if (intf->cur_altsetting->desc.bInterfaceSubClass
- != SC_VIDEOSTREAMING) {
+ != UVC_SC_VIDEOSTREAMING) {
uvc_trace(UVC_TRACE_DESCR, "device %d interface %d isn't a "
"video streaming interface\n", dev->udev->devnum,
intf->altsetting[0].desc.bInterfaceNumber);
@@ -589,12 +589,12 @@ static int uvc_parse_streaming(struct uvc_device *dev,
/* Parse the header descriptor. */
switch (buffer[2]) {
- case VS_OUTPUT_HEADER:
+ case UVC_VS_OUTPUT_HEADER:
streaming->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
size = 9;
break;
- case VS_INPUT_HEADER:
+ case UVC_VS_INPUT_HEADER:
streaming->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
size = 13;
break;
@@ -618,7 +618,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
streaming->header.bNumFormats = p;
streaming->header.bEndpointAddress = buffer[6];
- if (buffer[2] == VS_INPUT_HEADER) {
+ if (buffer[2] == UVC_VS_INPUT_HEADER) {
streaming->header.bmInfo = buffer[7];
streaming->header.bTerminalLink = buffer[8];
streaming->header.bStillCaptureMethod = buffer[9];
@@ -644,15 +644,15 @@ static int uvc_parse_streaming(struct uvc_device *dev,
_buflen = buflen;
/* Count the format and frame descriptors. */
- while (_buflen > 2 && _buffer[1] == CS_INTERFACE) {
+ while (_buflen > 2 && _buffer[1] == USB_DT_CS_INTERFACE) {
switch (_buffer[2]) {
- case VS_FORMAT_UNCOMPRESSED:
- case VS_FORMAT_MJPEG:
- case VS_FORMAT_FRAME_BASED:
+ case UVC_VS_FORMAT_UNCOMPRESSED:
+ case UVC_VS_FORMAT_MJPEG:
+ case UVC_VS_FORMAT_FRAME_BASED:
nformats++;
break;
- case VS_FORMAT_DV:
+ case UVC_VS_FORMAT_DV:
/* DV format has no frame descriptor. We will create a
* dummy frame descriptor with a dummy frame interval.
*/
@@ -661,22 +661,22 @@ static int uvc_parse_streaming(struct uvc_device *dev,
nintervals++;
break;
- case VS_FORMAT_MPEG2TS:
- case VS_FORMAT_STREAM_BASED:
+ case UVC_VS_FORMAT_MPEG2TS:
+ case UVC_VS_FORMAT_STREAM_BASED:
uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
"interface %d FORMAT %u is not supported.\n",
dev->udev->devnum,
alts->desc.bInterfaceNumber, _buffer[2]);
break;
- case VS_FRAME_UNCOMPRESSED:
- case VS_FRAME_MJPEG:
+ case UVC_VS_FRAME_UNCOMPRESSED:
+ case UVC_VS_FRAME_MJPEG:
nframes++;
if (_buflen > 25)
nintervals += _buffer[25] ? _buffer[25] : 3;
break;
- case VS_FRAME_FRAME_BASED:
+ case UVC_VS_FRAME_FRAME_BASED:
nframes++;
if (_buflen > 21)
nintervals += _buffer[21] ? _buffer[21] : 3;
@@ -709,12 +709,12 @@ static int uvc_parse_streaming(struct uvc_device *dev,
streaming->nformats = nformats;
/* Parse the format descriptors. */
- while (buflen > 2 && buffer[1] == CS_INTERFACE) {
+ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) {
switch (buffer[2]) {
- case VS_FORMAT_UNCOMPRESSED:
- case VS_FORMAT_MJPEG:
- case VS_FORMAT_DV:
- case VS_FORMAT_FRAME_BASED:
+ case UVC_VS_FORMAT_UNCOMPRESSED:
+ case UVC_VS_FORMAT_MJPEG:
+ case UVC_VS_FORMAT_DV:
+ case UVC_VS_FORMAT_FRAME_BASED:
format->frame = frame;
ret = uvc_parse_format(dev, streaming, format,
&interval, buffer, buflen);
@@ -819,7 +819,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
return -ENOMEM;
unit->id = buffer[3];
- unit->type = VC_EXTENSION_UNIT;
+ unit->type = UVC_VC_EXTENSION_UNIT;
memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
unit->extension.bNrInPins = get_unaligned_le16(&buffer[21]);
@@ -856,7 +856,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
__u16 type;
switch (buffer[2]) {
- case VC_HEADER:
+ case UVC_VC_HEADER:
n = buflen >= 12 ? buffer[11] : 0;
if (buflen < 12 || buflen < 12 + n) {
@@ -883,7 +883,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
}
break;
- case VC_INPUT_TERMINAL:
+ case UVC_VC_INPUT_TERMINAL:
if (buflen < 8) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d INPUT_TERMINAL error\n",
@@ -908,11 +908,11 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
p = 0;
len = 8;
- if (type == ITT_CAMERA) {
+ if (type == UVC_ITT_CAMERA) {
n = buflen >= 15 ? buffer[14] : 0;
len = 15;
- } else if (type == ITT_MEDIA_TRANSPORT_INPUT) {
+ } else if (type == UVC_ITT_MEDIA_TRANSPORT_INPUT) {
n = buflen >= 9 ? buffer[8] : 0;
p = buflen >= 10 + n ? buffer[9+n] : 0;
len = 10;
@@ -932,7 +932,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
term->id = buffer[3];
term->type = type | UVC_TERM_INPUT;
- if (UVC_ENTITY_TYPE(term) == ITT_CAMERA) {
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
term->camera.bControlSize = n;
term->camera.bmControls = (__u8 *)term + sizeof *term;
term->camera.wObjectiveFocalLengthMin =
@@ -942,7 +942,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
term->camera.wOcularFocalLength =
get_unaligned_le16(&buffer[12]);
memcpy(term->camera.bmControls, &buffer[15], n);
- } else if (UVC_ENTITY_TYPE(term) == ITT_MEDIA_TRANSPORT_INPUT) {
+ } else if (UVC_ENTITY_TYPE(term) ==
+ UVC_ITT_MEDIA_TRANSPORT_INPUT) {
term->media.bControlSize = n;
term->media.bmControls = (__u8 *)term + sizeof *term;
term->media.bTransportModeSize = p;
@@ -955,9 +956,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
if (buffer[7] != 0)
usb_string(udev, buffer[7], term->name,
sizeof term->name);
- else if (UVC_ENTITY_TYPE(term) == ITT_CAMERA)
+ else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
sprintf(term->name, "Camera %u", buffer[3]);
- else if (UVC_ENTITY_TYPE(term) == ITT_MEDIA_TRANSPORT_INPUT)
+ else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
sprintf(term->name, "Media %u", buffer[3]);
else
sprintf(term->name, "Input %u", buffer[3]);
@@ -965,7 +966,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
list_add_tail(&term->list, &dev->entities);
break;
- case VC_OUTPUT_TERMINAL:
+ case UVC_VC_OUTPUT_TERMINAL:
if (buflen < 9) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d OUTPUT_TERMINAL error\n",
@@ -1002,7 +1003,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
list_add_tail(&term->list, &dev->entities);
break;
- case VC_SELECTOR_UNIT:
+ case UVC_VC_SELECTOR_UNIT:
p = buflen >= 5 ? buffer[4] : 0;
if (buflen < 5 || buflen < 6 + p) {
@@ -1031,7 +1032,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
list_add_tail(&unit->list, &dev->entities);
break;
- case VC_PROCESSING_UNIT:
+ case UVC_VC_PROCESSING_UNIT:
n = buflen >= 8 ? buffer[7] : 0;
p = dev->uvc_version >= 0x0110 ? 10 : 9;
@@ -1066,7 +1067,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
list_add_tail(&unit->list, &dev->entities);
break;
- case VC_EXTENSION_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
p = buflen >= 22 ? buffer[21] : 0;
n = buflen >= 24 + p ? buffer[22+p] : 0;
@@ -1194,7 +1195,7 @@ static int uvc_scan_chain_entity(struct uvc_video_device *video,
struct uvc_entity *entity)
{
switch (UVC_ENTITY_TYPE(entity)) {
- case VC_EXTENSION_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(" <- XU %d", entity->id);
@@ -1207,7 +1208,7 @@ static int uvc_scan_chain_entity(struct uvc_video_device *video,
list_add_tail(&entity->chain, &video->extensions);
break;
- case VC_PROCESSING_UNIT:
+ case UVC_VC_PROCESSING_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(" <- PU %d", entity->id);
@@ -1220,7 +1221,7 @@ static int uvc_scan_chain_entity(struct uvc_video_device *video,
video->processing = entity;
break;
- case VC_SELECTOR_UNIT:
+ case UVC_VC_SELECTOR_UNIT:
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(" <- SU %d", entity->id);
@@ -1237,16 +1238,16 @@ static int uvc_scan_chain_entity(struct uvc_video_device *video,
video->selector = entity;
break;
- case ITT_VENDOR_SPECIFIC:
- case ITT_CAMERA:
- case ITT_MEDIA_TRANSPORT_INPUT:
+ case UVC_ITT_VENDOR_SPECIFIC:
+ case UVC_ITT_CAMERA:
+ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(" <- IT %d\n", entity->id);
list_add_tail(&entity->chain, &video->iterms);
break;
- case TT_STREAMING:
+ case UVC_TT_STREAMING:
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(" <- IT %d\n", entity->id);
@@ -1291,7 +1292,7 @@ static int uvc_scan_chain_forward(struct uvc_video_device *video,
if (forward == NULL)
break;
- if (UVC_ENTITY_TYPE(forward) != VC_EXTENSION_UNIT ||
+ if (UVC_ENTITY_TYPE(forward) != UVC_VC_EXTENSION_UNIT ||
forward == prev)
continue;
@@ -1323,15 +1324,15 @@ static int uvc_scan_chain_backward(struct uvc_video_device *video,
int id = -1, i;
switch (UVC_ENTITY_TYPE(entity)) {
- case VC_EXTENSION_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
id = entity->extension.baSourceID[0];
break;
- case VC_PROCESSING_UNIT:
+ case UVC_VC_PROCESSING_UNIT:
id = entity->processing.bSourceID;
break;
- case VC_SELECTOR_UNIT:
+ case UVC_VC_SELECTOR_UNIT:
/* Single-input selector units are ignored. */
if (entity->selector.bNrInPins == 1) {
id = entity->selector.baSourceID[0];
@@ -1377,7 +1378,7 @@ static int uvc_scan_chain(struct uvc_video_device *video)
entity = video->oterm;
uvc_trace(UVC_TRACE_PROBE, "Scanning UVC chain: OT %d", entity->id);
- if (UVC_ENTITY_TYPE(entity) == TT_STREAMING)
+ if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
video->sterm = entity;
id = entity->output.bSourceID;
@@ -1664,7 +1665,8 @@ static void uvc_disconnect(struct usb_interface *intf)
*/
usb_set_intfdata(intf, NULL);
- if (intf->cur_altsetting->desc.bInterfaceSubClass == SC_VIDEOSTREAMING)
+ if (intf->cur_altsetting->desc.bInterfaceSubClass ==
+ UVC_SC_VIDEOSTREAMING)
return;
/* uvc_v4l2_open() might race uvc_disconnect(). A static driver-wide
@@ -1692,7 +1694,8 @@ static int uvc_suspend(struct usb_interface *intf, pm_message_t message)
intf->cur_altsetting->desc.bInterfaceNumber);
/* Controls are cached on the fly so they don't need to be saved. */
- if (intf->cur_altsetting->desc.bInterfaceSubClass == SC_VIDEOCONTROL)
+ if (intf->cur_altsetting->desc.bInterfaceSubClass ==
+ UVC_SC_VIDEOCONTROL)
return uvc_status_suspend(dev);
if (dev->video.streaming->intf != intf) {
@@ -1711,7 +1714,8 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
uvc_trace(UVC_TRACE_SUSPEND, "Resuming interface %u\n",
intf->cur_altsetting->desc.bInterfaceNumber);
- if (intf->cur_altsetting->desc.bInterfaceSubClass == SC_VIDEOCONTROL) {
+ if (intf->cur_altsetting->desc.bInterfaceSubClass ==
+ UVC_SC_VIDEOCONTROL) {
if (reset) {
int ret = uvc_ctrl_resume_device(dev);
@@ -1925,7 +1929,8 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
- .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS },
+ .driver_info = UVC_QUIRK_PROBE_MINMAX
+ | UVC_QUIRK_PROBE_EXTRAFIELDS },
/* Ecamm Pico iMage */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 5e77cad29690..87cb9cc75af2 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -648,7 +648,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(input, 0, sizeof *input);
input->index = index;
strlcpy(input->name, iterm->name, sizeof input->name);
- if (UVC_ENTITY_TYPE(iterm) == ITT_CAMERA)
+ if (UVC_ENTITY_TYPE(iterm) == UVC_ITT_CAMERA)
input->type = V4L2_INPUT_TYPE_CAMERA;
break;
}
@@ -663,9 +663,9 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
- ret = uvc_query_ctrl(video->dev, GET_CUR, video->selector->id,
- video->dev->intfnum, SU_INPUT_SELECT_CONTROL,
- &input, 1);
+ ret = uvc_query_ctrl(video->dev, UVC_GET_CUR,
+ video->selector->id, video->dev->intfnum,
+ UVC_SU_INPUT_SELECT_CONTROL, &input, 1);
if (ret < 0)
return ret;
@@ -690,9 +690,9 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (input == 0 || input > video->selector->selector.bNrInPins)
return -EINVAL;
- return uvc_query_ctrl(video->dev, SET_CUR, video->selector->id,
- video->dev->intfnum, SU_INPUT_SELECT_CONTROL,
- &input, 1);
+ return uvc_query_ctrl(video->dev, UVC_SET_CUR,
+ video->selector->id, video->dev->intfnum,
+ UVC_SU_INPUT_SELECT_CONTROL, &input, 1);
}
/* Try, Get, Set & Enum format */
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 01b633c73480..cf618d7c1f55 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -129,10 +129,10 @@ static int uvc_get_video_ctrl(struct uvc_video_device *video,
return -ENOMEM;
ret = __uvc_query_ctrl(video->dev, query, 0, video->streaming->intfnum,
- probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size,
- UVC_CTRL_STREAMING_TIMEOUT);
+ probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data,
+ size, UVC_CTRL_STREAMING_TIMEOUT);
- if ((query == GET_MIN || query == GET_MAX) && ret == 2) {
+ if ((query == UVC_GET_MIN || query == UVC_GET_MAX) && ret == 2) {
/* Some cameras, mostly based on Bison Electronics chipsets,
* answer a GET_MIN or GET_MAX request with the wCompQuality
* field only.
@@ -144,7 +144,7 @@ static int uvc_get_video_ctrl(struct uvc_video_device *video,
ctrl->wCompQuality = le16_to_cpup((__le16 *)data);
ret = 0;
goto out;
- } else if (query == GET_DEF && probe == 1 && ret != size) {
+ } else if (query == UVC_GET_DEF && probe == 1 && ret != size) {
/* Many cameras don't support the GET_DEF request on their
* video probe control. Warn once and return, the caller will
* fall back to GET_CUR.
@@ -232,10 +232,10 @@ static int uvc_set_video_ctrl(struct uvc_video_device *video,
data[33] = ctrl->bMaxVersion;
}
- ret = __uvc_query_ctrl(video->dev, SET_CUR, 0,
+ ret = __uvc_query_ctrl(video->dev, UVC_SET_CUR, 0,
video->streaming->intfnum,
- probe ? VS_PROBE_CONTROL : VS_COMMIT_CONTROL, data, size,
- UVC_CTRL_STREAMING_TIMEOUT);
+ probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data,
+ size, UVC_CTRL_STREAMING_TIMEOUT);
if (ret != size) {
uvc_printk(KERN_ERR, "Failed to set UVC %s control : "
"%d (exp. %u).\n", probe ? "probe" : "commit",
@@ -269,10 +269,10 @@ int uvc_probe_video(struct uvc_video_device *video,
/* Get the minimum and maximum values for compression settings. */
if (!(video->dev->quirks & UVC_QUIRK_PROBE_MINMAX)) {
- ret = uvc_get_video_ctrl(video, &probe_min, 1, GET_MIN);
+ ret = uvc_get_video_ctrl(video, &probe_min, 1, UVC_GET_MIN);
if (ret < 0)
goto done;
- ret = uvc_get_video_ctrl(video, &probe_max, 1, GET_MAX);
+ ret = uvc_get_video_ctrl(video, &probe_max, 1, UVC_GET_MAX);
if (ret < 0)
goto done;
@@ -280,8 +280,11 @@ int uvc_probe_video(struct uvc_video_device *video,
}
for (i = 0; i < 2; ++i) {
- if ((ret = uvc_set_video_ctrl(video, probe, 1)) < 0 ||
- (ret = uvc_get_video_ctrl(video, probe, 1, GET_CUR)) < 0)
+ ret = uvc_set_video_ctrl(video, probe, 1);
+ if (ret < 0)
+ goto done;
+ ret = uvc_get_video_ctrl(video, probe, 1, UVC_GET_CUR);
+ if (ret < 0)
goto done;
if (video->streaming->intf->num_altsetting == 1)
@@ -1065,7 +1068,7 @@ int uvc_video_init(struct uvc_video_device *video)
* requests on the probe control will just keep their current streaming
* parameters.
*/
- if (uvc_get_video_ctrl(video, probe, 1, GET_DEF) == 0)
+ if (uvc_get_video_ctrl(video, probe, 1, UVC_GET_DEF) == 0)
uvc_set_video_ctrl(video, probe, 1);
/* Initialize the streaming parameters with the probe control current
@@ -1073,7 +1076,8 @@ int uvc_video_init(struct uvc_video_device *video)
* control will always use values retrieved from a successful GET_CUR
* request on the probe control, as required by the UVC specification.
*/
- if ((ret = uvc_get_video_ctrl(video, probe, 1, GET_CUR)) < 0)
+ ret = uvc_get_video_ctrl(video, probe, 1, UVC_GET_CUR);
+ if (ret < 0)
return ret;
/* Check if the default format descriptor exists. Use the first
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 3c78d3c1e4c0..5cf68f590725 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -67,155 +67,12 @@ struct uvc_xu_control {
#ifdef __KERNEL__
#include <linux/poll.h>
+#include <linux/usb/video.h>
/* --------------------------------------------------------------------------
* UVC constants
*/
-#define SC_UNDEFINED 0x00
-#define SC_VIDEOCONTROL 0x01
-#define SC_VIDEOSTREAMING 0x02
-#define SC_VIDEO_INTERFACE_COLLECTION 0x03
-
-#define PC_PROTOCOL_UNDEFINED 0x00
-
-#define CS_UNDEFINED 0x20
-#define CS_DEVICE 0x21
-#define CS_CONFIGURATION 0x22
-#define CS_STRING 0x23
-#define CS_INTERFACE 0x24
-#define CS_ENDPOINT 0x25
-
-/* VideoControl class specific interface descriptor */
-#define VC_DESCRIPTOR_UNDEFINED 0x00
-#define VC_HEADER 0x01
-#define VC_INPUT_TERMINAL 0x02
-#define VC_OUTPUT_TERMINAL 0x03
-#define VC_SELECTOR_UNIT 0x04
-#define VC_PROCESSING_UNIT 0x05
-#define VC_EXTENSION_UNIT 0x06
-
-/* VideoStreaming class specific interface descriptor */
-#define VS_UNDEFINED 0x00
-#define VS_INPUT_HEADER 0x01
-#define VS_OUTPUT_HEADER 0x02
-#define VS_STILL_IMAGE_FRAME 0x03
-#define VS_FORMAT_UNCOMPRESSED 0x04
-#define VS_FRAME_UNCOMPRESSED 0x05
-#define VS_FORMAT_MJPEG 0x06
-#define VS_FRAME_MJPEG 0x07
-#define VS_FORMAT_MPEG2TS 0x0a
-#define VS_FORMAT_DV 0x0c
-#define VS_COLORFORMAT 0x0d
-#define VS_FORMAT_FRAME_BASED 0x10
-#define VS_FRAME_FRAME_BASED 0x11
-#define VS_FORMAT_STREAM_BASED 0x12
-
-/* Endpoint type */
-#define EP_UNDEFINED 0x00
-#define EP_GENERAL 0x01
-#define EP_ENDPOINT 0x02
-#define EP_INTERRUPT 0x03
-
-/* Request codes */
-#define RC_UNDEFINED 0x00
-#define SET_CUR 0x01
-#define GET_CUR 0x81
-#define GET_MIN 0x82
-#define GET_MAX 0x83
-#define GET_RES 0x84
-#define GET_LEN 0x85
-#define GET_INFO 0x86
-#define GET_DEF 0x87
-
-/* VideoControl interface controls */
-#define VC_CONTROL_UNDEFINED 0x00
-#define VC_VIDEO_POWER_MODE_CONTROL 0x01
-#define VC_REQUEST_ERROR_CODE_CONTROL 0x02
-
-/* Terminal controls */
-#define TE_CONTROL_UNDEFINED 0x00
-
-/* Selector Unit controls */
-#define SU_CONTROL_UNDEFINED 0x00
-#define SU_INPUT_SELECT_CONTROL 0x01
-
-/* Camera Terminal controls */
-#define CT_CONTROL_UNDEFINED 0x00
-#define CT_SCANNING_MODE_CONTROL 0x01
-#define CT_AE_MODE_CONTROL 0x02
-#define CT_AE_PRIORITY_CONTROL 0x03
-#define CT_EXPOSURE_TIME_ABSOLUTE_CONTROL 0x04
-#define CT_EXPOSURE_TIME_RELATIVE_CONTROL 0x05
-#define CT_FOCUS_ABSOLUTE_CONTROL 0x06
-#define CT_FOCUS_RELATIVE_CONTROL 0x07
-#define CT_FOCUS_AUTO_CONTROL 0x08
-#define CT_IRIS_ABSOLUTE_CONTROL 0x09
-#define CT_IRIS_RELATIVE_CONTROL 0x0a
-#define CT_ZOOM_ABSOLUTE_CONTROL 0x0b
-#define CT_ZOOM_RELATIVE_CONTROL 0x0c
-#define CT_PANTILT_ABSOLUTE_CONTROL 0x0d
-#define CT_PANTILT_RELATIVE_CONTROL 0x0e
-#define CT_ROLL_ABSOLUTE_CONTROL 0x0f
-#define CT_ROLL_RELATIVE_CONTROL 0x10
-#define CT_PRIVACY_CONTROL 0x11
-
-/* Processing Unit controls */
-#define PU_CONTROL_UNDEFINED 0x00
-#define PU_BACKLIGHT_COMPENSATION_CONTROL 0x01
-#define PU_BRIGHTNESS_CONTROL 0x02
-#define PU_CONTRAST_CONTROL 0x03
-#define PU_GAIN_CONTROL 0x04
-#define PU_POWER_LINE_FREQUENCY_CONTROL 0x05
-#define PU_HUE_CONTROL 0x06
-#define PU_SATURATION_CONTROL 0x07
-#define PU_SHARPNESS_CONTROL 0x08
-#define PU_GAMMA_CONTROL 0x09
-#define PU_WHITE_BALANCE_TEMPERATURE_CONTROL 0x0a
-#define PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL 0x0b
-#define PU_WHITE_BALANCE_COMPONENT_CONTROL 0x0c
-#define PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL 0x0d
-#define PU_DIGITAL_MULTIPLIER_CONTROL 0x0e
-#define PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL 0x0f
-#define PU_HUE_AUTO_CONTROL 0x10
-#define PU_ANALOG_VIDEO_STANDARD_CONTROL 0x11
-#define PU_ANALOG_LOCK_STATUS_CONTROL 0x12
-
-#define LXU_MOTOR_PANTILT_RELATIVE_CONTROL 0x01
-#define LXU_MOTOR_PANTILT_RESET_CONTROL 0x02
-#define LXU_MOTOR_FOCUS_MOTOR_CONTROL 0x03
-
-/* VideoStreaming interface controls */
-#define VS_CONTROL_UNDEFINED 0x00
-#define VS_PROBE_CONTROL 0x01
-#define VS_COMMIT_CONTROL 0x02
-#define VS_STILL_PROBE_CONTROL 0x03
-#define VS_STILL_COMMIT_CONTROL 0x04
-#define VS_STILL_IMAGE_TRIGGER_CONTROL 0x05
-#define VS_STREAM_ERROR_CODE_CONTROL 0x06
-#define VS_GENERATE_KEY_FRAME_CONTROL 0x07
-#define VS_UPDATE_FRAME_SEGMENT_CONTROL 0x08
-#define VS_SYNC_DELAY_CONTROL 0x09
-
-#define TT_VENDOR_SPECIFIC 0x0100
-#define TT_STREAMING 0x0101
-
-/* Input Terminal types */
-#define ITT_VENDOR_SPECIFIC 0x0200
-#define ITT_CAMERA 0x0201
-#define ITT_MEDIA_TRANSPORT_INPUT 0x0202
-
-/* Output Terminal types */
-#define OTT_VENDOR_SPECIFIC 0x0300
-#define OTT_DISPLAY 0x0301
-#define OTT_MEDIA_TRANSPORT_OUTPUT 0x0302
-
-/* External Terminal types */
-#define EXTERNAL_VENDOR_SPECIFIC 0x0400
-#define COMPOSITE_CONNECTOR 0x0401
-#define SVIDEO_CONNECTOR 0x0402
-#define COMPONENT_CONNECTOR 0x0403
-
#define UVC_TERM_INPUT 0x0000
#define UVC_TERM_OUTPUT 0x8000
@@ -227,8 +84,6 @@ struct uvc_xu_control {
#define UVC_ENTITY_IS_OTERM(entity) \
(((entity)->type & 0x8000) == UVC_TERM_OUTPUT)
-#define UVC_STATUS_TYPE_CONTROL 1
-#define UVC_STATUS_TYPE_STREAMING 2
/* ------------------------------------------------------------------------
* GUIDs
@@ -249,19 +104,6 @@ struct uvc_xu_control {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
-#define UVC_GUID_LOGITECH_DEV_INFO \
- {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \
- 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x1e}
-#define UVC_GUID_LOGITECH_USER_HW \
- {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \
- 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x1f}
-#define UVC_GUID_LOGITECH_VIDEO \
- {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \
- 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x50}
-#define UVC_GUID_LOGITECH_MOTOR \
- {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \
- 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x56}
-
#define UVC_GUID_FORMAT_MJPEG \
{ 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index be64a502ea27..48840e95f943 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -42,6 +42,12 @@
printk(KERN_DEBUG "%s: " fmt, vfd->name, ## arg);\
} while (0)
+#define dbgarg3(fmt, arg...) \
+ do { \
+ if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) \
+ printk(KERN_CONT "%s: " fmt, vfd->name, ## arg);\
+ } while (0)
+
/* Zero out the end of the struct pointed to by p. Everthing after, but
* not including, the specified field is cleared. */
#define CLEAR_AFTER_FIELD(p, field) \
@@ -1717,24 +1723,29 @@ static long __video_do_ioctl(struct file *file,
ret = ops->vidioc_enum_framesizes(file, fh, p);
dbgarg(cmd,
- "index=%d, pixelformat=%d, type=%d ",
- p->index, p->pixel_format, p->type);
+ "index=%d, pixelformat=%c%c%c%c, type=%d ",
+ p->index,
+ (p->pixel_format & 0xff),
+ (p->pixel_format >> 8) & 0xff,
+ (p->pixel_format >> 16) & 0xff,
+ (p->pixel_format >> 24) & 0xff,
+ p->type);
switch (p->type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
- dbgarg2("width = %d, height=%d\n",
+ dbgarg3("width = %d, height=%d\n",
p->discrete.width, p->discrete.height);
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
- dbgarg2("min %dx%d, max %dx%d, step %dx%d\n",
+ dbgarg3("min %dx%d, max %dx%d, step %dx%d\n",
p->stepwise.min_width, p->stepwise.min_height,
p->stepwise.step_width, p->stepwise.step_height,
p->stepwise.max_width, p->stepwise.max_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
- dbgarg2("continuous\n");
+ dbgarg3("continuous\n");
break;
default:
- dbgarg2("- Unknown type!\n");
+ dbgarg3("- Unknown type!\n");
}
break;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index cd7266858462..7705fc6baf00 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -343,6 +343,53 @@ static struct bar_std bars[] = {
#define TO_U(r, g, b) \
(((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
+/* precalculate color bar values to speed up rendering */
+static void precalculate_bars(struct vivi_fh *fh)
+{
+ struct vivi_dev *dev = fh->dev;
+ unsigned char r, g, b;
+ int k, is_yuv;
+
+ fh->input = dev->input;
+
+ for (k = 0; k < 8; k++) {
+ r = bars[fh->input].bar[k][0];
+ g = bars[fh->input].bar[k][1];
+ b = bars[fh->input].bar[k][2];
+ is_yuv = 0;
+
+ switch (fh->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ is_yuv = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ r >>= 3;
+ g >>= 2;
+ b >>= 3;
+ break;
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_RGB555X:
+ r >>= 3;
+ g >>= 3;
+ b >>= 3;
+ break;
+ }
+
+ if (is_yuv) {
+ fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
+ fh->bars[k][1] = TO_U(r, g, b); /* Cb */
+ fh->bars[k][2] = TO_V(r, g, b); /* Cr */
+ } else {
+ fh->bars[k][0] = r;
+ fh->bars[k][1] = g;
+ fh->bars[k][2] = b;
+ }
+ }
+
+}
+
#define TSTAMP_MIN_Y 24
#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15)
#define TSTAMP_INPUT_X 10
@@ -755,6 +802,8 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
buf->vb.height = fh->height;
buf->vb.field = field;
+ precalculate_bars(fh);
+
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
@@ -893,53 +942,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-/* precalculate color bar values to speed up rendering */
-static void precalculate_bars(struct vivi_fh *fh)
-{
- struct vivi_dev *dev = fh->dev;
- unsigned char r, g, b;
- int k, is_yuv;
-
- fh->input = dev->input;
-
- for (k = 0; k < 8; k++) {
- r = bars[fh->input].bar[k][0];
- g = bars[fh->input].bar[k][1];
- b = bars[fh->input].bar[k][2];
- is_yuv = 0;
-
- switch (fh->fmt->fourcc) {
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
- is_yuv = 1;
- break;
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_RGB565X:
- r >>= 3;
- g >>= 2;
- b >>= 3;
- break;
- case V4L2_PIX_FMT_RGB555:
- case V4L2_PIX_FMT_RGB555X:
- r >>= 3;
- g >>= 3;
- b >>= 3;
- break;
- }
-
- if (is_yuv) {
- fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
- fh->bars[k][1] = TO_U(r, g, b); /* Cb */
- fh->bars[k][2] = TO_V(r, g, b); /* Cr */
- } else {
- fh->bars[k][0] = r;
- fh->bars[k][1] = g;
- fh->bars[k][2] = b;
- }
- }
-
-}
-
/*FIXME: This seems to be generic enough to be at videodev2 */
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
@@ -965,8 +967,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- precalculate_bars(fh);
-
ret = 0;
out:
mutex_unlock(&q->vb_lock);
@@ -1357,6 +1357,7 @@ static int __init vivi_create_instance(int inst)
goto unreg_dev;
*vfd = vivi_template;
+ vfd->debug = debug;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 13e7d7bfe85f..8ff10cb77cac 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -465,14 +465,14 @@ static int ab3100_get_set_reg_open_file(struct inode *inode, struct file *file)
return 0;
}
-static int ab3100_get_set_reg(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t ab3100_get_set_reg(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ab3100_get_set_reg_priv *priv = file->private_data;
struct ab3100 *ab3100 = priv->ab3100;
char buf[32];
- int buf_size;
+ ssize_t buf_size;
int regp;
unsigned long user_reg;
int err;
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 7ac12cb0be4a..5b6e58a3ba46 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -32,8 +32,7 @@
* This driver was tested with firmware revision A4.
*/
-#if defined(CONFIG_KEYBOARD_DM355EVM) \
- || defined(CONFIG_KEYBOARD_DM355EVM_MODULE)
+#if defined(CONFIG_INPUT_DM355EVM) || defined(CONFIG_INPUT_DM355EVM_MODULE)
#define msp_has_keyboard() true
#else
#define msp_has_keyboard() false
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 671a7efe86a8..86d394894d81 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/mfd/ezx-pcap.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#define PCAP_ADC_MAXQ 8
struct pcap_adc_request {
@@ -106,11 +107,35 @@ int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
}
EXPORT_SYMBOL_GPL(ezx_pcap_read);
+int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
+{
+ int ret;
+ u32 tmp = PCAP_REGISTER_READ_OP_BIT |
+ (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
+
+ mutex_lock(&pcap->io_mutex);
+ ret = ezx_pcap_putget(pcap, &tmp);
+ if (ret)
+ goto out_unlock;
+
+ tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
+ tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
+ (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
+
+ ret = ezx_pcap_putget(pcap, &tmp);
+out_unlock:
+ mutex_unlock(&pcap->io_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
+
/* IRQ */
-static inline unsigned int irq2pcap(struct pcap_chip *pcap, int irq)
+int irq_to_pcap(struct pcap_chip *pcap, int irq)
{
- return 1 << (irq - pcap->irq_base);
+ return irq - pcap->irq_base;
}
+EXPORT_SYMBOL_GPL(irq_to_pcap);
int pcap_to_irq(struct pcap_chip *pcap, int irq)
{
@@ -122,7 +147,7 @@ static void pcap_mask_irq(unsigned int irq)
{
struct pcap_chip *pcap = get_irq_chip_data(irq);
- pcap->msr |= irq2pcap(pcap, irq);
+ pcap->msr |= 1 << irq_to_pcap(pcap, irq);
queue_work(pcap->workqueue, &pcap->msr_work);
}
@@ -130,7 +155,7 @@ static void pcap_unmask_irq(unsigned int irq)
{
struct pcap_chip *pcap = get_irq_chip_data(irq);
- pcap->msr &= ~irq2pcap(pcap, irq);
+ pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
queue_work(pcap->workqueue, &pcap->msr_work);
}
@@ -154,34 +179,38 @@ static void pcap_isr_work(struct work_struct *work)
u32 msr, isr, int_sel, service;
int irq;
- ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
- ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
+ do {
+ ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
+ ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
- /* We cant service/ack irqs that are assigned to port 2 */
- if (!(pdata->config & PCAP_SECOND_PORT)) {
- ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
- isr &= ~int_sel;
- }
- ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
+ /* We cant service/ack irqs that are assigned to port 2 */
+ if (!(pdata->config & PCAP_SECOND_PORT)) {
+ ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
+ isr &= ~int_sel;
+ }
- local_irq_disable();
- service = isr & ~msr;
+ ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
+ ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
- for (irq = pcap->irq_base; service; service >>= 1, irq++) {
- if (service & 1) {
- struct irq_desc *desc = irq_to_desc(irq);
+ local_irq_disable();
+ service = isr & ~msr;
+ for (irq = pcap->irq_base; service; service >>= 1, irq++) {
+ if (service & 1) {
+ struct irq_desc *desc = irq_to_desc(irq);
- if (WARN(!desc, KERN_WARNING
- "Invalid PCAP IRQ %d\n", irq))
- break;
+ if (WARN(!desc, KERN_WARNING
+ "Invalid PCAP IRQ %d\n", irq))
+ break;
- if (desc->status & IRQ_DISABLED)
- note_interrupt(irq, desc, IRQ_NONE);
- else
- desc->handle_irq(irq, desc);
+ if (desc->status & IRQ_DISABLED)
+ note_interrupt(irq, desc, IRQ_NONE);
+ else
+ desc->handle_irq(irq, desc);
+ }
}
- }
- local_irq_enable();
+ local_irq_enable();
+ ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
+ } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
}
static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -194,6 +223,19 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
}
/* ADC */
+void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
+{
+ u32 tmp;
+
+ mutex_lock(&pcap->adc_mutex);
+ ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
+ tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
+ tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
+ ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
+ mutex_unlock(&pcap->adc_mutex);
+}
+EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
+
static void pcap_disable_adc(struct pcap_chip *pcap)
{
u32 tmp;
@@ -216,15 +258,16 @@ static void pcap_adc_trigger(struct pcap_chip *pcap)
mutex_unlock(&pcap->adc_mutex);
return;
}
- mutex_unlock(&pcap->adc_mutex);
-
- /* start conversion on requested bank */
- tmp = pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
+ /* start conversion on requested bank, save TS_M bits */
+ ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
+ tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
+ tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
tmp |= PCAP_ADC_AD_SEL1;
ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
+ mutex_unlock(&pcap->adc_mutex);
ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
}
@@ -238,8 +281,10 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
mutex_lock(&pcap->adc_mutex);
req = pcap->adc_queue[pcap->adc_head];
- if (WARN(!req, KERN_WARNING "adc irq without pending request\n"))
+ if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
+ mutex_unlock(&pcap->adc_mutex);
return IRQ_HANDLED;
+ }
/* read requested channels results */
ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 4c7b7962f6b8..0cc5eeff5ee8 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -367,7 +367,8 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
break;
default:
- return -1;
+ gate = -1;
+ goto already;
}
writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index bae61b22501c..e57f778f405a 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -441,7 +441,7 @@ static void twl4030_sih_do_edge(struct work_struct *work)
/* see what work we have */
spin_lock_irq(&sih_agent_lock);
edge_change = agent->edge_change;
- agent->edge_change = 0;;
+ agent->edge_change = 0;
sih = edge_change ? agent->sih : NULL;
spin_unlock_irq(&sih_agent_lock);
if (!sih)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 68ab39d7cb35..d16b79fcc8fd 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -76,6 +76,35 @@ config IBM_ASM
information on the specific driver level and support statement
for your IBM server.
+config HWLAT_DETECTOR
+ tristate "Testing module to detect hardware-induced latencies"
+ depends on DEBUG_FS
+ depends on RING_BUFFER
+ default m
+ ---help---
+ A simple hardware latency detector. Use this module to detect
+ large latencies introduced by the behavior of the underlying
+ system firmware external to Linux. We do this using periodic
+ use of stop_machine to grab all available CPUs and measure
+ for unexplainable gaps in the CPU timestamp counter(s). By
+ default, the module is not enabled until the "enable" file
+ within the "hwlat_detector" debugfs directory is toggled.
+
+ This module is often used to detect SMI (System Management
+ Interrupts) on x86 systems, though is not x86 specific. To
+ this end, we default to using a sample window of 1 second,
+ during which we will sample for 0.5 seconds. If an SMI or
+ similar event occurs during that time, it is recorded
+ into an 8K samples global ring buffer until retreived.
+
+ WARNING: This software should never be enabled (it can be built
+ but should not be turned on after it is loaded) in a production
+ environment where high latencies are a concern since the
+ sampling mechanism actually introduces latencies for
+ regular tasks while the CPU(s) are being held.
+
+ If unsure, say N
+
config PHANTOM
tristate "Sensable PHANToM (PCI)"
depends on PCI
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 36f733cd60e6..bec53c3dc780 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_C2PORT) += c2port/
+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
obj-y += eeprom/
obj-y += cb710/
diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
new file mode 100644
index 000000000000..e02d8e17b484
--- /dev/null
+++ b/drivers/misc/hwlat_detector.c
@@ -0,0 +1,1208 @@
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
+ *
+ * Use this module to detect large system latencies induced by the behavior of
+ * certain underlying system hardware or firmware, independent of Linux itself.
+ * The code was developed originally to detect the presence of SMIs on Intel
+ * and AMD systems, although there is no dependency upon x86 herein.
+ *
+ * The classical example usage of this module is in detecting the presence of
+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
+ * LPC (or other device) to generate a special interrupt under certain
+ * circumstances, for example, upon expiration of a special SMI timer device,
+ * due to certain external thermal readings, on certain I/O address accesses,
+ * and other situations. An SMI hits a special CPU pin, triggers a special
+ * SMI mode (complete with special memory map), and the OS is unaware.
+ *
+ * Although certain hardware-inducing latencies are necessary (for example,
+ * a modern system often requires an SMI handler for correct thermal control
+ * and remote management) they can wreak havoc upon any OS-level performance
+ * guarantees toward low-latency, especially when the OS is not even made
+ * aware of the presence of these interrupts. For this reason, we need a
+ * somewhat brute force mechanism to detect these interrupts. In this case,
+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
+ * sampling the built-in CPU timer, looking for discontiguous readings.
+ *
+ * WARNING: This implementation necessarily introduces latencies. Therefore,
+ * you should NEVER use this module in a production environment
+ * requiring any kind of low-latency performance guarantee(s).
+ *
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ *
+ * Includes useful feedback from Clark Williams <clark@redhat.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ring_buffer.h>
+#include <linux/stop_machine.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+
+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
+#define U64STR_SIZE 22 /* 20 digits max */
+
+#define VERSION "1.0.0"
+#define BANNER "hwlat_detector: "
+#define DRVNAME "hwlat_detector"
+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
+
+/* Module metadata */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
+MODULE_DESCRIPTION("A simple hardware latency detector");
+MODULE_VERSION(VERSION);
+
+/* Module parameters */
+
+static int debug;
+static int enabled;
+static int threshold;
+
+module_param(debug, int, 0); /* enable debug */
+module_param(enabled, int, 0); /* enable detector */
+module_param(threshold, int, 0); /* latency threshold */
+
+/* Buffering and sampling */
+
+static struct ring_buffer *ring_buffer; /* sample buffer */
+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
+static unsigned long buf_size = BUF_SIZE_DEFAULT;
+static struct task_struct *kthread; /* sampling thread */
+
+/* DebugFS filesystem entries */
+
+static struct dentry *debug_dir; /* debugfs directory */
+static struct dentry *debug_max; /* maximum TSC delta */
+static struct dentry *debug_count; /* total detect count */
+static struct dentry *debug_sample_width; /* sample width us */
+static struct dentry *debug_sample_window; /* sample window us */
+static struct dentry *debug_sample; /* raw samples us */
+static struct dentry *debug_threshold; /* threshold us */
+static struct dentry *debug_enable; /* enable/disable */
+
+/* Individual samples and global state */
+
+struct sample; /* latency sample */
+struct data; /* Global state */
+
+/* Sampling functions */
+static int __buffer_add_sample(struct sample *sample);
+static struct sample *buffer_get_sample(struct sample *sample);
+static int get_sample(void *unused);
+
+/* Threading and state */
+static int kthread_fn(void *unused);
+static int start_kthread(void);
+static int stop_kthread(void);
+static void __reset_stats(void);
+static int init_stats(void);
+
+/* Debugfs interface */
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos, const u64 *entry);
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, u64 *entry);
+static int debug_sample_fopen(struct inode *inode, struct file *filp);
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+static int debug_sample_release(struct inode *inode, struct file *filp);
+static int debug_enable_fopen(struct inode *inode, struct file *filp);
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+static ssize_t debug_enable_fwrite(struct file *file,
+ const char __user *user_buffer,
+ size_t user_size, loff_t *offset);
+
+/* Initialization functions */
+static int init_debugfs(void);
+static void free_debugfs(void);
+static int detector_init(void);
+static void detector_exit(void);
+
+/* Individual latency samples are stored here when detected and packed into
+ * the ring_buffer circular buffer, where they are overwritten when
+ * more than buf_size/sizeof(sample) samples are received. */
+struct sample {
+ u64 seqnum; /* unique sequence */
+ u64 duration; /* ktime delta */
+ struct timespec timestamp; /* wall time */
+};
+
+/* keep the global state somewhere. Mostly used under stop_machine. */
+static struct data {
+
+ struct mutex lock; /* protect changes */
+
+ u64 count; /* total since reset */
+ u64 max_sample; /* max hardware latency */
+ u64 threshold; /* sample threshold level */
+
+ u64 sample_window; /* total sampling window (on+off) */
+ u64 sample_width; /* active sampling portion of window */
+
+ atomic_t sample_open; /* whether the sample file is open */
+
+ wait_queue_head_t wq; /* waitqeue for new sample values */
+
+} data;
+
+/**
+ * __buffer_add_sample - add a new latency sample recording to the ring buffer
+ * @sample: The new latency sample value
+ *
+ * This receives a new latency sample and records it in a global ring buffer.
+ * No additional locking is used in this case - suited for stop_machine use.
+ */
+static int __buffer_add_sample(struct sample *sample)
+{
+ return ring_buffer_write(ring_buffer,
+ sizeof(struct sample), sample);
+}
+
+/**
+ * buffer_get_sample - remove a hardware latency sample from the ring buffer
+ * @sample: Pre-allocated storage for the sample
+ *
+ * This retrieves a hardware latency sample from the global circular buffer
+ */
+static struct sample *buffer_get_sample(struct sample *sample)
+{
+ struct ring_buffer_event *e = NULL;
+ struct sample *s = NULL;
+ unsigned int cpu = 0;
+
+ if (!sample)
+ return NULL;
+
+ /* ring_buffers are per-cpu but we just want any value */
+ /* so we'll start with this cpu and try others if not */
+ /* Steven is planning to add a generic mechanism */
+ mutex_lock(&ring_buffer_mutex);
+ e = ring_buffer_consume(ring_buffer, smp_processor_id(), NULL);
+ if (!e) {
+ for_each_online_cpu(cpu) {
+ e = ring_buffer_consume(ring_buffer, cpu, NULL);
+ if (e)
+ break;
+ }
+ }
+
+ if (e) {
+ s = ring_buffer_event_data(e);
+ memcpy(sample, s, sizeof(struct sample));
+ } else
+ sample = NULL;
+ mutex_unlock(&ring_buffer_mutex);
+
+ return sample;
+}
+
+/**
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
+ * @unused: This is not used but is a part of the stop_machine API
+ *
+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
+ * hardware-induced latency. Called under stop_machine, with data.lock held.
+ */
+static int get_sample(void *unused)
+{
+ ktime_t start, t1, t2;
+ s64 diff, total = 0;
+ u64 sample = 0;
+ int ret = 1;
+
+ start = ktime_get(); /* start timestamp */
+
+ do {
+
+ t1 = ktime_get(); /* we'll look for a discontinuity */
+ t2 = ktime_get();
+
+ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
+ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
+
+ /* This shouldn't happen */
+ if (diff < 0) {
+ printk(KERN_ERR BANNER "time running backwards\n");
+ goto out;
+ }
+
+ if (diff > sample)
+ sample = diff; /* only want highest value */
+
+ } while (total <= data.sample_width);
+
+ /* If we exceed the threshold value, we have found a hardware latency */
+ if (sample > data.threshold) {
+ struct sample s;
+
+ data.count++;
+ s.seqnum = data.count;
+ s.duration = sample;
+ s.timestamp = CURRENT_TIME;
+ __buffer_add_sample(&s);
+
+ /* Keep a running maximum ever recorded hardware latency */
+ if (sample > data.max_sample)
+ data.max_sample = sample;
+
+ wake_up(&data.wq); /* wake up reader(s) */
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
+ * @unused: A required part of the kthread API.
+ *
+ * Used to periodically sample the CPU TSC via a call to get_sample. We
+ * use stop_machine, whith does (intentionally) introduce latency since we
+ * need to ensure nothing else might be running (and thus pre-empting).
+ * Obviously this should never be used in production environments.
+ *
+ * stop_machine will schedule us typically only on CPU0 which is fine for
+ * almost every real-world hardware latency situation - but we might later
+ * generalize this if we find there are any actualy systems with alternate
+ * SMI delivery or other non CPU0 hardware latencies.
+ */
+static int kthread_fn(void *unused)
+{
+ int err = 0;
+ u64 interval = 0;
+
+ while (!kthread_should_stop()) {
+
+ mutex_lock(&data.lock);
+
+ err = stop_machine(get_sample, unused, 0);
+ if (err) {
+ /* Houston, we have a problem */
+ mutex_unlock(&data.lock);
+ goto err_out;
+ }
+
+ interval = data.sample_window - data.sample_width;
+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
+
+ mutex_unlock(&data.lock);
+
+ if (msleep_interruptible(interval))
+ goto out;
+ }
+ goto out;
+err_out:
+ printk(KERN_ERR BANNER "could not call stop_machine, disabling\n");
+ enabled = 0;
+out:
+ return err;
+
+}
+
+/**
+ * start_kthread - Kick off the hardware latency sampling/detector kthread
+ *
+ * This starts a kernel thread that will sit and sample the CPU timestamp
+ * counter (TSC or similar) and look for potential hardware latencies.
+ */
+static int start_kthread(void)
+{
+ kthread = kthread_run(kthread_fn, NULL,
+ DRVNAME);
+ if (IS_ERR(kthread)) {
+ printk(KERN_ERR BANNER "could not start sampling thread\n");
+ enabled = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static int stop_kthread(void)
+{
+ int ret;
+
+ ret = kthread_stop(kthread);
+
+ return ret;
+}
+
+/**
+ * __reset_stats - Reset statistics for the hardware latency detector
+ *
+ * We use data to store various statistics and global state. We call this
+ * function in order to reset those when "enable" is toggled on or off, and
+ * also at initialization. Should be called with data.lock held.
+ */
+static void __reset_stats(void)
+{
+ data.count = 0;
+ data.max_sample = 0;
+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
+}
+
+/**
+ * init_stats - Setup global state statistics for the hardware latency detector
+ *
+ * We use data to store various statistics and global state. We also use
+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
+ * induced system latencies. This function initializes these structures and
+ * allocates the global ring buffer also.
+ */
+static int init_stats(void)
+{
+ int ret = -ENOMEM;
+
+ mutex_init(&data.lock);
+ init_waitqueue_head(&data.wq);
+ atomic_set(&data.sample_open, 0);
+
+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
+
+ if (WARN(!ring_buffer, KERN_ERR BANNER
+ "failed to allocate ring buffer!\n"))
+ goto out;
+
+ __reset_stats();
+ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */
+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
+
+ ret = 0;
+
+out:
+ return ret;
+
+}
+
+/*
+ * simple_data_read - Wrapper read function for global state debugfs entries
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ * @entry: The entry to read from
+ *
+ * This function provides a generic read implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_read directly, but we need to make sure that the data.lock
+ * spinlock is held during the actual read (even though we likely won't ever
+ * actually race here as the updater runs under a stop_machine context).
+ */
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos, const u64 *entry)
+{
+ char buf[U64STR_SIZE];
+ u64 val = 0;
+ int len = 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ if (!entry)
+ return -EFAULT;
+
+ mutex_lock(&data.lock);
+ val = *entry;
+ mutex_unlock(&data.lock);
+
+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+
+}
+
+/*
+ * simple_data_write - Wrapper write function for global state debugfs entries
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to write value from
+ * @cnt: The maximum number of bytes to write
+ * @ppos: The current "file" position
+ * @entry: The entry to write to
+ *
+ * This function provides a generic write implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_write directly, but we need to make sure that the data.lock
+ * spinlock is held during the actual write (even though we likely won't ever
+ * actually race here as the updater runs under a stop_machine context).
+ */
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, u64 *entry)
+{
+ char buf[U64STR_SIZE];
+ int csize = min(cnt, sizeof(buf));
+ u64 val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
+ err = strict_strtoull(buf, 10, &val);
+ if (err)
+ return -EINVAL;
+
+ mutex_lock(&data.lock);
+ *entry = val;
+ mutex_unlock(&data.lock);
+
+ return csize;
+}
+
+/**
+ * debug_count_fopen - Open function for "count" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "count" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_count_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_count_fread - Read function for "count" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "count" debugfs
+ * interface to the hardware latency detector. Can be used to read the
+ * number of latency readings exceeding the configured threshold since
+ * the detector was last reset (e.g. by writing a zero into "count").
+ */
+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
+}
+
+/**
+ * debug_count_fwrite - Write function for "count" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "count" debugfs
+ * interface to the hardware latency detector. Can be used to write a
+ * desired value, especially to zero the total count.
+ */
+static ssize_t debug_count_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
+}
+
+/**
+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "enable" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_enable_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_enable_fread - Read function for "enable" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "enable" debugfs
+ * interface to the hardware latency detector. Can be used to determine
+ * whether the detector is currently enabled ("0\n" or "1\n" returned).
+ */
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[4];
+
+ if ((cnt < sizeof(buf)) || (*ppos))
+ return 0;
+
+ buf[0] = enabled ? '1' : '0';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ if (copy_to_user(ubuf, buf, strlen(buf)))
+ return -EFAULT;
+ return *ppos = strlen(buf);
+}
+
+/**
+ * debug_enable_fwrite - Write function for "enable" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "enable" debugfs
+ * interface to the hardware latency detector. Can be used to enable or
+ * disable the detector, which will have the side-effect of possibly
+ * also resetting the global stats and kicking off the measuring
+ * kthread (on an enable) or the converse (upon a disable).
+ */
+static ssize_t debug_enable_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[4];
+ int csize = min(cnt, sizeof(buf));
+ long val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[sizeof(buf)-1] = '\0'; /* just in case */
+ err = strict_strtoul(buf, 10, &val);
+ if (0 != err)
+ return -EINVAL;
+
+ if (val) {
+ if (enabled)
+ goto unlock;
+ enabled = 1;
+ __reset_stats();
+ if (start_kthread())
+ return -EFAULT;
+ } else {
+ if (!enabled)
+ goto unlock;
+ enabled = 0;
+ stop_kthread();
+ wake_up(&data.wq); /* reader(s) should return */
+ }
+unlock:
+ return csize;
+}
+
+/**
+ * debug_max_fopen - Open function for "max" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "max" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_max_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_max_fread - Read function for "max" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "max" debugfs
+ * interface to the hardware latency detector. Can be used to determine
+ * the maximum latency value observed since it was last reset.
+ */
+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
+}
+
+/**
+ * debug_max_fwrite - Write function for "max" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "max" debugfs
+ * interface to the hardware latency detector. Can be used to reset the
+ * maximum or set it to some other desired value - if, then, subsequent
+ * measurements exceed this value, the maximum will be updated.
+ */
+static ssize_t debug_max_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
+}
+
+
+/**
+ * debug_sample_fopen - An open function for "sample" debugfs interface
+ * @inode: The in-kernel inode representation of this debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function handles opening the "sample" file within the hardware
+ * latency detector debugfs directory interface. This file is used to read
+ * raw samples from the global ring_buffer and allows the user to see a
+ * running latency history. Can be opened blocking or non-blocking,
+ * affecting whether it behaves as a buffer read pipe, or does not.
+ * Implements simple locking to prevent multiple simultaneous use.
+ */
+static int debug_sample_fopen(struct inode *inode, struct file *filp)
+{
+ if (!atomic_add_unless(&data.sample_open, 1, 1))
+ return -EBUSY;
+ else
+ return 0;
+}
+
+/**
+ * debug_sample_fread - A read function for "sample" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that will contain the samples read
+ * @cnt: The maximum bytes to read from the debugfs "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function handles reading from the "sample" file within the hardware
+ * latency detector debugfs directory interface. This file is used to read
+ * raw samples from the global ring_buffer and allows the user to see a
+ * running latency history. By default this will block pending a new
+ * value written into the sample buffer, unless there are already a
+ * number of value(s) waiting in the buffer, or the sample file was
+ * previously opened in a non-blocking mode of operation.
+ */
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int len = 0;
+ char buf[64];
+ struct sample *sample = NULL;
+
+ if (!enabled)
+ return 0;
+
+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
+ if (!sample)
+ return -ENOMEM;
+
+ while (!buffer_get_sample(sample)) {
+
+ DEFINE_WAIT(wait);
+
+ if (filp->f_flags & O_NONBLOCK) {
+ len = -EAGAIN;
+ goto out;
+ }
+
+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&data.wq, &wait);
+
+ if (signal_pending(current)) {
+ len = -EINTR;
+ goto out;
+ }
+
+ if (!enabled) { /* enable was toggled */
+ len = 0;
+ goto out;
+ }
+ }
+
+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
+ sample->timestamp.tv_sec,
+ sample->timestamp.tv_nsec,
+ sample->duration);
+
+
+ /* handling partial reads is more trouble than it's worth */
+ if (len > cnt)
+ goto out;
+
+ if (copy_to_user(ubuf, buf, len))
+ len = -EFAULT;
+
+out:
+ kfree(sample);
+ return len;
+}
+
+/**
+ * debug_sample_release - Release function for "sample" debugfs interface
+ * @inode: The in-kernel inode represenation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function completes the close of the debugfs interface "sample" file.
+ * Frees the sample_open "lock" so that other users may open the interface.
+ */
+static int debug_sample_release(struct inode *inode, struct file *filp)
+{
+ atomic_dec(&data.sample_open);
+
+ return 0;
+}
+
+/**
+ * debug_threshold_fopen - Open function for "threshold" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "threshold" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_threshold_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_threshold_fread - Read function for "threshold" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "threshold" debugfs
+ * interface to the hardware latency detector. It can be used to determine
+ * the current threshold level at which a latency will be recorded in the
+ * global ring buffer, typically on the order of 10us.
+ */
+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
+}
+
+/**
+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "threshold" debugfs
+ * interface to the hardware latency detector. It can be used to configure
+ * the threshold level at which any subsequently detected latencies will
+ * be recorded into the global ring buffer.
+ */
+static ssize_t debug_threshold_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ int ret;
+
+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
+
+ if (enabled)
+ wake_up_process(kthread);
+
+ return ret;
+}
+
+/**
+ * debug_width_fopen - Open function for "width" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "width" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_width_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_width_fread - Read function for "width" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "width" debugfs
+ * interface to the hardware latency detector. It can be used to determine
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latecy periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch.
+ */
+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
+}
+
+/**
+ * debug_width_fwrite - Write function for "width" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "width" debugfs
+ * interface to the hardware latency detector. It can be used to configure
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latency periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch. It
+ * is enforced that width is less that the total window size.
+ */
+static ssize_t debug_width_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[U64STR_SIZE];
+ int csize = min(cnt, sizeof(buf));
+ u64 val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
+ err = strict_strtoull(buf, 10, &val);
+ if (0 != err)
+ return -EINVAL;
+
+ mutex_lock(&data.lock);
+ if (val < data.sample_window)
+ data.sample_width = val;
+ else {
+ mutex_unlock(&data.lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&data.lock);
+
+ if (enabled)
+ wake_up_process(kthread);
+
+ return csize;
+}
+
+/**
+ * debug_window_fopen - Open function for "window" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "window" debugfs
+ * interface to the hardware latency detector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs.
+ */
+static int debug_window_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_window_fread - Read function for "window" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "window" debugfs
+ * interface to the hardware latency detector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to read the total window size.
+ */
+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
+}
+
+/**
+ * debug_window_fwrite - Write function for "window" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "window" debufds
+ * interface to the hardware latency detetector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to write a new total window size. It
+ * is enfoced that any value written must be greater than the sample width
+ * size, or an error results.
+ */
+static ssize_t debug_window_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[U64STR_SIZE];
+ int csize = min(cnt, sizeof(buf));
+ u64 val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
+ err = strict_strtoull(buf, 10, &val);
+ if (0 != err)
+ return -EINVAL;
+
+ mutex_lock(&data.lock);
+ if (data.sample_width < val)
+ data.sample_window = val;
+ else {
+ mutex_unlock(&data.lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&data.lock);
+
+ return csize;
+}
+
+/*
+ * Function pointers for the "count" debugfs file operations
+ */
+static const struct file_operations count_fops = {
+ .open = debug_count_fopen,
+ .read = debug_count_fread,
+ .write = debug_count_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "enable" debugfs file operations
+ */
+static const struct file_operations enable_fops = {
+ .open = debug_enable_fopen,
+ .read = debug_enable_fread,
+ .write = debug_enable_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "max" debugfs file operations
+ */
+static const struct file_operations max_fops = {
+ .open = debug_max_fopen,
+ .read = debug_max_fread,
+ .write = debug_max_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "sample" debugfs file operations
+ */
+static const struct file_operations sample_fops = {
+ .open = debug_sample_fopen,
+ .read = debug_sample_fread,
+ .release = debug_sample_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "threshold" debugfs file operations
+ */
+static const struct file_operations threshold_fops = {
+ .open = debug_threshold_fopen,
+ .read = debug_threshold_fread,
+ .write = debug_threshold_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "width" debugfs file operations
+ */
+static const struct file_operations width_fops = {
+ .open = debug_width_fopen,
+ .read = debug_width_fread,
+ .write = debug_width_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "window" debugfs file operations
+ */
+static const struct file_operations window_fops = {
+ .open = debug_window_fopen,
+ .read = debug_window_fread,
+ .write = debug_window_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * init_debugfs - A function to initialize the debugfs interface files
+ *
+ * This function creates entries in debugfs for "hwlat_detector", including
+ * files to read values from the detector, current samples, and the
+ * maximum sample that has been captured since the hardware latency
+ * dectector was started.
+ */
+static int init_debugfs(void)
+{
+ int ret = -ENOMEM;
+
+ debug_dir = debugfs_create_dir(DRVNAME, NULL);
+ if (!debug_dir)
+ goto err_debug_dir;
+
+ debug_sample = debugfs_create_file("sample", 0444,
+ debug_dir, NULL,
+ &sample_fops);
+ if (!debug_sample)
+ goto err_sample;
+
+ debug_count = debugfs_create_file("count", 0444,
+ debug_dir, NULL,
+ &count_fops);
+ if (!debug_count)
+ goto err_count;
+
+ debug_max = debugfs_create_file("max", 0444,
+ debug_dir, NULL,
+ &max_fops);
+ if (!debug_max)
+ goto err_max;
+
+ debug_sample_window = debugfs_create_file("window", 0644,
+ debug_dir, NULL,
+ &window_fops);
+ if (!debug_sample_window)
+ goto err_window;
+
+ debug_sample_width = debugfs_create_file("width", 0644,
+ debug_dir, NULL,
+ &width_fops);
+ if (!debug_sample_width)
+ goto err_width;
+
+ debug_threshold = debugfs_create_file("threshold", 0644,
+ debug_dir, NULL,
+ &threshold_fops);
+ if (!debug_threshold)
+ goto err_threshold;
+
+ debug_enable = debugfs_create_file("enable", 0644,
+ debug_dir, &enabled,
+ &enable_fops);
+ if (!debug_enable)
+ goto err_enable;
+
+ else {
+ ret = 0;
+ goto out;
+ }
+
+err_enable:
+ debugfs_remove(debug_threshold);
+err_threshold:
+ debugfs_remove(debug_sample_width);
+err_width:
+ debugfs_remove(debug_sample_window);
+err_window:
+ debugfs_remove(debug_max);
+err_max:
+ debugfs_remove(debug_count);
+err_count:
+ debugfs_remove(debug_sample);
+err_sample:
+ debugfs_remove(debug_dir);
+err_debug_dir:
+out:
+ return ret;
+}
+
+/**
+ * free_debugfs - A function to cleanup the debugfs file interface
+ */
+static void free_debugfs(void)
+{
+ /* could also use a debugfs_remove_recursive */
+ debugfs_remove(debug_enable);
+ debugfs_remove(debug_threshold);
+ debugfs_remove(debug_sample_width);
+ debugfs_remove(debug_sample_window);
+ debugfs_remove(debug_max);
+ debugfs_remove(debug_count);
+ debugfs_remove(debug_sample);
+ debugfs_remove(debug_dir);
+}
+
+/**
+ * detector_init - Standard module initialization code
+ */
+static int detector_init(void)
+{
+ int ret = -ENOMEM;
+
+ printk(KERN_INFO BANNER "version %s\n", VERSION);
+
+ ret = init_stats();
+ if (0 != ret)
+ goto out;
+
+ ret = init_debugfs();
+ if (0 != ret)
+ goto err_stats;
+
+ if (enabled)
+ ret = start_kthread();
+
+ goto out;
+
+err_stats:
+ ring_buffer_free(ring_buffer);
+out:
+ return ret;
+
+}
+
+/**
+ * detector_exit - Standard module cleanup code
+ */
+static void detector_exit(void)
+{
+ if (enabled) {
+ enabled = 0;
+ stop_kthread();
+ }
+
+ free_debugfs();
+ ring_buffer_free(ring_buffer); /* free up the ring buffer */
+
+}
+
+module_init(detector_init);
+module_exit(detector_exit);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae9..a461017ce5ce 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_spi_host *host;
int status;
+ /* We rely on full duplex transfers, mostly to reduce
+ * per-transfer overheads (by making fewer transfers).
+ */
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
/* MMC and SD specs only seem to care that sampling is on the
* rising edge ... meaning SPI modes 0 or 3. So either SPI mode
* should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f918..1479da6d3aa6 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part,
parts[this_part].name,
parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 59c46126a5ce..3f7985311e35 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
#define SR_SRWD 0x80 /* SR write protect */
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
@@ -776,13 +776,13 @@ static struct spi_driver m25p80_driver = {
};
-static int m25p80_init(void)
+static int __init m25p80_init(void)
{
return spi_register_driver(&m25p80_driver);
}
-static void m25p80_exit(void)
+static void __exit m25p80_exit(void)
{
spi_unregister_driver(&m25p80_driver);
}
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 00248e81ecd5..7d846e9173da 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -303,7 +303,7 @@ __setup("slram=", mtd_slram_setup);
#endif
-static int init_slram(void)
+static int __init init_slram(void)
{
char *devname;
int i;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index a790c062af1f..e56d6b42f020 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1099,7 +1099,7 @@ static struct mtd_blktrans_ops ftl_tr = {
.owner = THIS_MODULE,
};
-static int init_ftl(void)
+static int __init init_ftl(void)
{
return register_mtd_blktrans(&ftl_tr);
}
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8c..d8cf29c01cc4 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
if (!desperate && inftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
"EUNs (%d)\n", inftl->numfreeEUNs);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
- if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
- thisEUN = INFTL_makefreeblock(inftl, 0xffff);
+ thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
/*
* Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
"Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/*
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index b08a798ee254..2aac41bde8b3 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -42,10 +42,8 @@
#include <mach/hardware.h>
#include <asm/system.h>
-#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
-
struct armflash_subdev_info {
- char name[SUBDEV_NAME_SIZE];
+ char *name;
struct mtd_info *mtd;
struct map_info map;
struct flash_platform_data *plat;
@@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
map_destroy(subdev->mtd);
if (subdev->map.virt)
iounmap(subdev->map.virt);
+ kfree(subdev->name);
+ subdev->name = NULL;
release_mem_region(subdev->map.phys, subdev->map.size);
}
@@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev)
if (nr == 1)
/* No MTD concatenation, just use the default name */
- snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
- dev_name(&dev->dev));
+ subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
else
- snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
- dev_name(&dev->dev), i);
+ subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
+ dev_name(&dev->dev), i);
+ if (!subdev->name) {
+ err = -ENOMEM;
+ break;
+ }
subdev->plat = plat;
err = armflash_subdev_probe(subdev, res);
- if (err)
+ if (err) {
+ kfree(subdev->name);
+ subdev->name = NULL;
break;
+ }
}
info->nr_subdev = i;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2802992b39da..20c828ba9405 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
+ printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
res = ENXIO;
goto err_no_partitions;
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 29acd06b1c39..1b4690bdfdb3 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -903,12 +903,12 @@ static struct pci_driver cafe_nand_pci_driver = {
.resume = cafe_nand_resume,
};
-static int cafe_nand_init(void)
+static int __init cafe_nand_init(void)
{
return pci_register_driver(&cafe_nand_pci_driver);
}
-static void cafe_nand_exit(void)
+static void __exit cafe_nand_exit(void)
{
pci_unregister_driver(&cafe_nand_pci_driver);
}
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 10081e656a6f..826cacffcefc 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -147,7 +147,7 @@ static int cmx270_device_ready(struct mtd_info *mtd)
/*
* Main initialization routine
*/
-static int cmx270_init(void)
+static int __init cmx270_init(void)
{
struct nand_chip *this;
const char *part_type;
@@ -261,7 +261,7 @@ module_init(cmx270_init);
/*
* Clean up routine
*/
-static void cmx270_cleanup(void)
+static void __exit cmx270_cleanup(void)
{
/* Release resources, unregister device */
nand_release(cmx270_nand_mtd);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0cd76f89f4b0..ebd07e95b814 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,8 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
unsigned long timeo = jiffies;
- int status, state = this->state;
+ int status = NAND_STATUS_FAIL, state = this->state;
if (state == FL_ERASING)
timeo += (HZ * 400) / 1000;
@@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
while (time_before(jiffies, timeo)) {
status = __raw_readb(this->IO_ADDR_R);
- if (!(status & 0x40))
+ if (status & NAND_STATUS_READY)
break;
+ cond_resched();
}
return status;
}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c2..fb86cacd5bdb 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
printk("Argh! No free blocks found! LastFreeEUN = %d, "
"FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
- return 0xffff;
+ return BLOCK_NIL;
}
} while (pot != nftl->LastFreeEUN);
- return 0xffff;
+ return BLOCK_NIL;
}
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
- oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
if (ChainLength < 2) {
printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
"Failing request\n");
- return 0xffff;
+ return BLOCK_NIL;
}
return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
//u16 startEUN = nftl->EUNtable[thisVUC];
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
- writeEUN = NFTL_makefreeblock(nftl, 0xffff);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
if (writeEUN == BLOCK_NIL) {
/* OK, we accept that the above comment is
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 286ed594e5a0..db0b9cb64c6c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -996,6 +996,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi_msg("number of PEBs reserved for bad PEB handling: %d",
ubi->beb_rsvd_pebs);
ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
+ ubi_msg("image sequence number: %d", ubi->image_seq);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c0ed60e8ade9..54b0186915fb 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -44,6 +44,8 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
be32_to_cpu(ec_hdr->vid_hdr_offset));
printk(KERN_DEBUG "\tdata_offset %d\n",
be32_to_cpu(ec_hdr->data_offset));
+ printk(KERN_DEBUG "\timage_seq %d\n",
+ be32_to_cpu(ec_hdr->image_seq));
printk(KERN_DEBUG "\thdr_crc %#08x\n",
be32_to_cpu(ec_hdr->hdr_crc));
printk(KERN_DEBUG "erase counter header hexdump:\n");
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 13777e5beac9..a4da7a09b949 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -93,6 +93,12 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
#define UBI_IO_DEBUG 0
#endif
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
+#else
+#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
+#endif
+
#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
#define DBG_DISABLE_BGT 1
#else
@@ -167,6 +173,7 @@ static inline int ubi_dbg_is_erase_failure(void)
#define ubi_dbg_is_bitflip() 0
#define ubi_dbg_is_write_failure() 0
#define ubi_dbg_is_erase_failure() 0
+#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
#endif /* !CONFIG_MTD_UBI_DEBUG */
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index effaff28bab1..1ea14218de02 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -98,17 +98,12 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr);
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
- int len);
-static int paranoid_check_empty(struct ubi_device *ubi, int pnum);
#else
#define paranoid_check_not_bad(ubi, pnum) 0
#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
-#define paranoid_check_empty(ubi, pnum) 0
#endif
/**
@@ -244,7 +239,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return err > 0 ? -EINVAL : err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = paranoid_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
if (err)
return err > 0 ? -EINVAL : err;
@@ -350,7 +345,7 @@ retry:
return -EIO;
}
- err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err > 0 ? -EINVAL : err;
@@ -672,11 +667,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (read_err != -EBADMSG &&
check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
- err = paranoid_check_all_ff(ubi, pnum, 0,
- ubi->peb_size);
- if (err)
- return err > 0 ? UBI_IO_BAD_EC_HDR : err;
-
if (verbose)
ubi_warn("no EC header found at PEB %d, "
"only 0xFF bytes", pnum);
@@ -752,6 +742,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
ec_hdr->version = UBI_VERSION;
ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
@@ -947,15 +938,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (read_err != -EBADMSG &&
check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
/* The physical eraseblock is supposedly free */
-
- /*
- * The below is just a paranoid check, it has to be
- * compiled out if paranoid checks are disabled.
- */
- err = paranoid_check_empty(ubi, pnum);
- if (err)
- return err > 0 ? UBI_IO_BAD_VID_HDR : err;
-
if (verbose)
ubi_warn("no VID header found at PEB %d, "
"only 0xFF bytes", pnum);
@@ -1229,7 +1211,7 @@ exit:
}
/**
- * paranoid_check_all_ff - check that a region of flash is empty.
+ * ubi_dbg_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
@@ -1239,8 +1221,7 @@ exit:
* @offset of the physical eraseblock @pnum, %1 if not, and a negative error
* code if an error occurred.
*/
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
- int len)
+int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
@@ -1276,74 +1257,4 @@ error:
return err;
}
-/**
- * paranoid_check_empty - whether a PEB is empty.
- * @ubi: UBI device description object
- * @pnum: the physical eraseblock number to check
- *
- * This function makes sure PEB @pnum is empty, which means it contains only
- * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a
- * negative error code in case of failure.
- *
- * Empty PEBs have the EC header, and do not have the VID header. The caller of
- * this function should have already made sure the PEB does not have the VID
- * header. However, this function re-checks that, because it is possible that
- * the header and data has already been written to the PEB.
- *
- * Let's consider a possible scenario. Suppose there are 2 tasks - A and B.
- * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to
- * find which LEB it corresponds to. PEB X is currently unmapped, and has no
- * VID header. Task B is trying to write to PEB X.
- *
- * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The
- * read data contain all 0xFF bytes;
- * Task B: writes VID header and some data to PEB X;
- * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we
- * do not re-read the VID header, and do not cancel the checking if it
- * is there, we fail.
- */
-static int paranoid_check_empty(struct ubi_device *ubi, int pnum)
-{
- int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize;
- size_t read;
- uint32_t magic;
- const struct ubi_vid_hdr *vid_hdr;
-
- mutex_lock(&ubi->dbg_buf_mutex);
- err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offs, read);
- goto error;
- }
-
- vid_hdr = ubi->dbg_peb_buf;
- magic = be32_to_cpu(vid_hdr->magic);
- if (magic == UBI_VID_HDR_MAGIC)
- /* The PEB contains VID header, so it is not empty */
- goto out;
-
- err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
- if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not "
- "contain all 0xFF bytes", pnum, offs, len);
- goto fail;
- }
-
-out:
- mutex_unlock(&ubi->dbg_buf_mutex);
- return 0;
-
-fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_msg("hex dump of the %d-%d region", offs, offs + len);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->dbg_peb_buf, len, 1);
- err = 1;
-error:
- ubi_dbg_dump_stack();
- mutex_unlock(&ubi->dbg_buf_mutex);
- return err;
-}
-
#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c3d653ba5ca0..f60895ee0aeb 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -757,6 +757,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
si->is_empty = 0;
if (!ec_corr) {
+ int image_seq;
+
/* Make sure UBI version is OK */
if (ech->version != UBI_VERSION) {
ubi_err("this UBI version is %d, image version is %d",
@@ -778,6 +780,18 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
ubi_dbg_dump_ec_hdr(ech);
return -EINVAL;
}
+
+ image_seq = be32_to_cpu(ech->ec);
+ if (!si->image_seq_set) {
+ ubi->image_seq = image_seq;
+ si->image_seq_set = 1;
+ } else if (ubi->image_seq != image_seq) {
+ ubi_err("bad image sequence number %d in PEB %d, "
+ "expected %d", image_seq, pnum, ubi->image_seq);
+ ubi_dbg_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
}
/* OK, we've done with the EC header, let's look at the VID header */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 61df208e2f20..1017cf12def5 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -102,6 +102,7 @@ struct ubi_scan_volume {
* @mean_ec: mean erase counter value
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
+ * @image_seq_set: indicates @ubi->image_seq is known
*
* This data structure contains the result of scanning and may be used by other
* UBI sub-systems to build final UBI data structures, further error-recovery
@@ -124,6 +125,7 @@ struct ubi_scan_info {
int mean_ec;
uint64_t ec_sum;
int ec_count;
+ int image_seq_set;
};
struct ubi_device;
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 8419fdccc79c..503ea9b27309 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -129,6 +129,7 @@ enum {
* @ec: the erase counter
* @vid_hdr_offset: where the VID header starts
* @data_offset: where the user data start
+ * @image_seq: image sequence number
* @padding2: reserved for future, zeroes
* @hdr_crc: erase counter header CRC checksum
*
@@ -144,6 +145,14 @@ enum {
* volume identifier header and user data, relative to the beginning of the
* physical eraseblock. These values have to be the same for all physical
* eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when scanning at start-up.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when scanning.
*/
struct ubi_ec_hdr {
__be32 magic;
@@ -152,7 +161,8 @@ struct ubi_ec_hdr {
__be64 ec; /* Warning: the current limit is 31-bit anyway! */
__be32 vid_hdr_offset;
__be32 data_offset;
- __u8 padding2[36];
+ __be32 image_seq;
+ __u8 padding2[32];
__be32 hdr_crc;
} __attribute__ ((packed));
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 28acd133c997..64604e8809ec 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -301,6 +301,7 @@ struct ubi_wl_entry;
* @vol->readers, @vol->writers, @vol->exclusive,
* @vol->ref_count, @vol->mapping and @vol->eba_tbl.
* @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
*
* @rsvd_pebs: count of reserved physical eraseblocks
* @avail_pebs: count of available physical eraseblocks
@@ -390,6 +391,7 @@ struct ubi_device {
struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
spinlock_t volumes_lock;
int ref_count;
+ int image_seq;
int rsvd_pebs;
int avail_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2b2472300610..600c7229d5cf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -459,6 +459,14 @@ retry:
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
prot_queue_add(ubi, e);
spin_unlock(&ubi->wl_lock);
+
+ err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
+ return err > 0 ? -EINVAL : err;
+ }
+
return e->pnum;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c155bd3ec9f1..f053ba5c37ba 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -209,7 +209,7 @@ config MII
config MACB
tristate "Atmel MACB support"
- depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91CAP9
+ depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9
select PHYLIB
help
The Atmel MACB ethernet interface is found on many AT32 and AT91
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index e4afbd628c23..607007d75b6f 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 619c6583e1aa..4003955d7a96 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f703758f0a6e..5b4bf3d2cdc2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define BE_MAX_LRO_DESCRIPTORS 16
-#define BE_MAX_FRAGS_PER_FRAME 16
+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
struct be_dma_mem {
void *va;
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9592f22e4c8c..cccc5419ad72 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
return -EINVAL;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
- if (adapter->max_rx_coal > MAX_SKB_FRAGS)
- adapter->max_rx_coal = MAX_SKB_FRAGS - 1;
+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
/* if AIC is being turned on now, start with an EQD of 0 */
if (rx_eq->enable_aic == 0 &&
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b02e805c1db3..29c33c709c6d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -55,6 +55,10 @@
#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */
#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66c10c87f517..c43f6a119295 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -666,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd;
+ u16 rxq_idx, i, num_rcvd, j;
u32 pktsize, hdr_len, curr_frag_len;
u8 *start;
@@ -709,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
/* More frags present for this completion */
pktsize -= curr_frag_len; /* account for above copied frag */
- for (i = 1; i < num_rcvd; i++) {
+ for (i = 1, j = 0; i < num_rcvd; i++) {
index_inc(&rxq_idx, rxq->len);
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(pktsize, rx_frag_size);
- skb_shinfo(skb)->frags[i].page = page_info->page;
- skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
- skb_shinfo(skb)->frags[i].size = curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ }
+
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- skb_shinfo(skb)->nr_frags++;
pktsize -= curr_frag_len;
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
struct be_queue_info *rxq = &adapter->rx_obj.q;
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
- u16 i, rxq_idx = 0, vid;
+ u16 i, rxq_idx = 0, vid, j;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
remaining = pkt_size;
- for (i = 0; i < num_rcvd; i++) {
+ for (i = 0, j = -1; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
- rx_frags[i].page = page_info->page;
- rx_frags[i].page_offset = page_info->page_offset;
- rx_frags[i].size = curr_frag_len;
- remaining -= curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ rx_frags[j].page = page_info->page;
+ rx_frags[j].page_offset = page_info->page_offset;
+ rx_frags[j].size = 0;
+ } else {
+ put_page(page_info->page);
+ }
+ rx_frags[j].size += curr_frag_len;
+ remaining -= curr_frag_len;
index_inc(&rxq_idx, rxq->len);
-
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
if (likely(!vlanf)) {
lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
@@ -1255,15 +1274,17 @@ static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
struct be_ctrl_info *ctrl = &adapter->ctrl;
- int rx, tx;
+ int isr;
- tx = event_handle(ctrl, &adapter->tx_eq);
- rx = event_handle(ctrl, &adapter->rx_eq);
+ isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
+ ctrl->pci_func * CEV_ISR_SIZE);
+ if (!isr)
+ return IRQ_NONE;
- if (rx || tx)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
+ event_handle(ctrl, &adapter->tx_eq);
+ event_handle(ctrl, &adapter->rx_eq);
+
+ return IRQ_HANDLED;
}
static irqreturn_t be_msix_rx(int irq, void *dev)
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1c..951714a7f90a 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->link_vars.link_up;
+}
+
static int bnx2x_get_eeprom_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
.nway_reset = bnx2x_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2x_get_link,
.get_eeprom_len = bnx2x_get_eeprom_len,
.get_eeprom = bnx2x_get_eeprom,
.set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 58afafbd3b9c..fd5e32cbcb87 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_multicast_list = cpmac_set_multicast_list,
- .ndo_so_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = cpmac_ioctl,
.ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5a..5b8cbdb4b520 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->skb) {
+ if (buffer_info->dma) {
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ }
+
+ buffer_info->dma = 0;
+ if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
break; /* while !buffer_info->skb */
}
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 8890c97e1120..c0f185beb8bc 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -238,6 +238,7 @@
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
/* Constants used to interpret the masked PCI-X bus speed. */
@@ -575,6 +576,8 @@
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 163c1c0cfee7..fd44d9f90769 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -215,6 +215,7 @@ enum e1e_registers {
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
+ E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
E1000_HICR = 0x08F00, /* Host Interface Control */
};
@@ -302,6 +303,9 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_REN 0x00200000
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
+#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e23f50fb9cd..d56c7473144a 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,6 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ union ich8_hws_flash_status hsfsts;
u32 gfpreg;
u32 sector_base_addr;
u32 sector_end_addr;
@@ -374,6 +375,20 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
+ /*
+ * Make sure the flash bank size does not overwrite the 4k
+ * sector ranges. We may have 64k allotted to us but we only care
+ * about the first 2 4k sectors. Therefore, if we have anything less
+ * than 64k set in the HSFSTS register, we will reduce the bank size
+ * down to 4k and let the rest remain unused. If berasesz == 3, then
+ * we are working in 64k mode. Otherwise we are not.
+ */
+ if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.berasesz != 3)
+ nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
+ }
+
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
/* Clear shadow ram */
@@ -446,6 +461,95 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
return 0;
}
+/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = 0;
+ goto out;
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000e_check_downshift(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ e1000e_config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg(hw, "Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -694,6 +798,38 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = er32(STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ } while ((!data) && --loop);
+
+ /*
+ * If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
+
+ /* Clear the Init Done bit for the next init event */
+ data = er32(STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ ew32(STATUS, data);
+}
+
+/**
* e1000_phy_hw_reset_ich8lan - Performs a PHY reset
* @hw: pointer to the HW structure
*
@@ -707,13 +843,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
u32 i;
u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val;
- u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
return ret_val;
+ /* Allow time for h/w to get to a quiescent state after reset */
+ mdelay(10);
+
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
@@ -741,26 +879,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (!(data & sw_cfg_mask))
return 0;
- /* Wait for basic configuration completes before proceeding*/
- do {
- data = er32(STATUS);
- data &= E1000_STATUS_LAN_INIT_DONE;
- udelay(100);
- } while ((!data) && --loop);
-
- /*
- * If basic configuration is incomplete before the above loop
- * count reaches 0, loading the configuration from NVM will
- * leave the PHY in a bad state possibly resulting in no link.
- */
- if (loop == 0) {
- hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
- }
-
- /* Clear the Init Done bit for the next init event */
- data = er32(STATUS);
- data &= ~E1000_STATUS_LAN_INIT_DONE;
- ew32(STATUS, data);
+ /* Wait for basic configuration completes before proceeding */
+ e1000_lan_init_done_ich8lan(hw);
/*
* Make sure HW does not configure LCD from PHY
@@ -961,12 +1081,14 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
- if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3))
+ if (hw->mac.type == e1000_ich8lan)
e1000e_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
@@ -979,6 +1101,9 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
@@ -1038,6 +1163,10 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (!active) {
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
@@ -1073,12 +1202,14 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
- if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3))
+ if (hw->mac.type == e1000_ich8lan)
e1000e_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
@@ -1905,7 +2036,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
break;
case 1:
sector_size = ICH_FLASH_SEG_SIZE_4K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
break;
case 2:
if (hw->mac.type == e1000_ich9lan) {
@@ -1917,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
break;
case 3:
sector_size = ICH_FLASH_SEG_SIZE_64K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
break;
default:
return -E1000_ERR_NVM;
@@ -2143,6 +2274,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
+ /* Clear PHY Reset Asserted bit */
+ if (hw->mac.type >= e1000_pchlan) {
+ u32 status = er32(STATUS);
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ }
+
/*
* PHY HW reset requires MAC CORE reset at the same
* time to make sure the interface between MAC and the
@@ -2156,23 +2293,34 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(20);
- if (!ret_val) {
- /* release the swflag because it is not reset by
- * hardware reset
- */
+ if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- }
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- hw_dbg(hw, "Auto Read Done did not complete\n");
+ if (ctrl & E1000_CTRL_PHY_RST)
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ hw_dbg(hw, "Auto Read Done did not complete\n");
+ }
}
+ /*
+ * For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ ew32(CRC_OFFSET, 0x65656565);
+
ew32(IMC, 0xffffffff);
icr = er32(ICR);
@@ -2222,6 +2370,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Reading the BM_WUC register will clear the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ hw->phy.ops.read_phy_reg(hw, BM_WUC, &i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Setup link and flow control */
ret_val = e1000_setup_link_ich8lan(hw);
@@ -2254,16 +2414,6 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ew32(CTRL_EXT, ctrl_ext);
/*
- * The 82578 Rx buffer will stall if wakeup is enabled in host and
- * the ME. Reading the BM_WUC register will clear the host wakeup bit.
- * Reset the phy after disabling host wakeup to reset the Rx buffer.
- */
- if (hw->phy.type == e1000_phy_82578) {
- e1e_rphy(hw, BM_WUC, &i);
- e1000e_phy_hw_reset_generic(hw);
- }
-
- /*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
@@ -2485,6 +2635,14 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
if (ret_val)
return ret_val;
+ if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ E1000_KMRNCTRLSTA_K1_DISABLE);
+ if (ret_val)
+ return ret_val;
+ }
+
if ((hw->mac.type == e1000_ich8lan) &&
(hw->phy.type == e1000_phy_igp_3) &&
(*speed == SPEED_1000)) {
@@ -2850,6 +3008,16 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
u32 bank = 0;
+ if (hw->mac.type >= e1000_pchlan) {
+ u32 status = er32(STATUS);
+
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ hw_dbg(hw,
+ "PHY Reset Asserted not set - needs delay\n");
+ }
+
e1000e_get_cfg_done(hw);
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
@@ -2921,7 +3089,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
static struct e1000_mac_operations ich8_mac_ops = {
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000_check_mng_mode_ich8lan,
- .check_for_link = e1000e_check_for_copper_link,
+ .check_for_link = e1000_check_for_copper_link_ich8lan,
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index be6d9e990374..99ba2b8a2a05 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -378,12 +378,6 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = 0;
- if (hw->phy.type == e1000_phy_82578) {
- ret_val = e1000_link_stall_workaround_hv(hw);
- if (ret_val)
- return ret_val;
- }
-
/*
* Check if there was DownShift, must be checked
* immediately after link-up
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 679885a122b4..63415bb6f48f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000e_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index e23459cf3d0e..994401fd0664 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1531,7 +1531,12 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
*/
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
if (ret_val)
- break;
+ /*
+ * If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ udelay(usec_interval);
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
@@ -2737,6 +2742,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_82578)
goto out;
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data);
+ if (data & PHY_CONTROL_LB)
+ goto out;
+
/* check if link is up and at 1Gbps */
ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
if (ret_val)
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3af581303ca2..d167090248e2 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
}
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
{
struct gfar __iomem *enet_regs;
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
#endif
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
struct device_node *np = NULL;
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs);
#else
err = -ENODEV;
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624aa..be480292aba1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
cleaned = true;
cleaned_count++;
+ /* this is the fast path for the non-packet split case */
if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_buffer_len +
- NET_IP_ALIGN,
+ adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_ps_hdr_size + NET_IP_ALIGN,
+ adapter->rx_ps_hdr_size,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
bufsz = adapter->rx_ps_hdr_size;
else
bufsz = adapter->rx_buffer_len;
- bufsz += NET_IP_ALIGN;
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
}
if (!buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
igb_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba5..911c082cee5a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
return 0;
}
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
static int __devinit bfin_sir_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
if (err)
goto err_mem_3;
- dev->hard_start_xmit = bfin_sir_hard_xmit;
- dev->open = bfin_sir_open;
- dev->stop = bfin_sir_stop;
- dev->do_ioctl = bfin_sir_ioctl;
- dev->get_stats = bfin_sir_stats;
- dev->irq = sir_port->irq;
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f27..2a978008fd6e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
s32 err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
/* 10000/copper and 1000/copper must autoneg
* this function does not support any duplex forcing, but can
* limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
} else {
/* in this case we currently only support 10Gb/FULL */
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -1829,7 +1830,6 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
break;
default:
wol->supported = 0;
- retval = 0;
}
return retval;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db32..a3061aacffd8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb = netdev_alloc_skb(adapter->netdev,
+ (rx_ring->rx_buf_len +
+ NET_IP_ALIGN));
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb_reserve(skb, NET_IP_ALIGN);
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data, bufsz,
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
pci_unmap_single(pdev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -2694,16 +2697,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/*
* For hot-pluggable SFP+ devices, a new SFP+ module may have
- * arrived before interrupts were enabled. We need to kick off
- * the SFP+ module setup first, then try to bring up link.
+ * arrived before interrupts were enabled but after probe. Such
+ * devices wouldn't have their type identified yet. We need to
+ * kick off the SFP+ module setup first, then try to bring up link.
* If we're not hot-pluggable SFP+, we just need to configure link
* and bring it up.
*/
- err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
- ixgbe_down(adapter);
- return err;
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ err = hw->phy.ops.identify(hw);
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ /*
+ * Take the device down and schedule the sfp tasklet
+ * which will unregister_netdev and log it.
+ */
+ ixgbe_down(adapter);
+ schedule_work(&adapter->sfp_config_module_task);
+ return err;
+ }
}
if (ixgbe_is_sfp(hw)) {
@@ -2812,9 +2822,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- rx_buffer_info->page_dma = 0;
+ if (rx_buffer_info->page_dma) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
rx_buffer_info->page_offset = 0;
@@ -3716,14 +3728,15 @@ static void ixgbe_sfp_task(struct work_struct *work)
if ((hw->phy.type == ixgbe_phy_nl) &&
(hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
s32 ret = hw->phy.ops.identify_sfp(hw);
- if (ret)
+ if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
goto reschedule;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to initialize because an "
- "unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize "
+ "because an unsupported SFP+ module type "
+ "was detected.\n"
+ "Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -4502,7 +4515,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
u32 autoneg;
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
- if (hw->mac.ops.get_link_capabilities)
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg,
&hw->mac.autoneg);
if (hw->mac.ops.setup_link_speed)
@@ -4524,10 +4538,17 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
u32 err;
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
+
+ /* Time for electrical oscillations to settle down */
+ msleep(100);
err = hw->phy.ops.identify_sfp(hw);
+
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
- ixgbe_down(adapter);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
+ unregister_netdev(adapter->netdev);
return;
}
hw->mac.ops.setup_sfp(hw);
@@ -5513,8 +5534,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
goto err_sw_init;
} else if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 453e966762f0..9ecad17522c3 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -60,6 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.mdio.prtad = phy_addr;
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
@@ -68,6 +69,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
break;
}
}
+ /* clear value if nothing found */
+ hw->phy.mdio.prtad = 0;
} else {
status = 0;
}
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index dc45e9856c35..6851bdb2ce29 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -14,6 +14,10 @@
#include <linux/mdio.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers");
+MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc.");
+MODULE_LICENSE("GPL");
+
/**
* mdio45_probe - probe for an MDIO (clause 45) device
* @mdio: MDIO interface
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 970cedeb5f37..e1cdba752e09 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -60,7 +60,18 @@
#define _NETXEN_NIC_LINUX_SUBVERSION 30
#define NETXEN_NIC_LINUX_VERSIONID "4.0.30"
-#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define NETXEN_DECODE_VERSION(v) \
+ NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
#define NETXEN_NUM_FLASH_SECTORS (64)
#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
@@ -614,6 +625,7 @@ struct netxen_new_user_info {
#define NX_P2_MN_ROMIMAGE 0
#define NX_P3_CT_ROMIMAGE 1
#define NX_P3_MN_ROMIMAGE 2
+#define NX_FLASH_ROMIMAGE 3
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */
@@ -1243,7 +1255,7 @@ struct netxen_adapter {
u32 resv3;
u8 has_link_events;
- u8 resv1;
+ u8 fw_type;
u16 tx_context_id;
u16 mtu;
u16 is_up;
@@ -1387,6 +1399,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter);
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
int netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_need_fw_reset(struct netxen_adapter *adapter);
void netxen_request_firmware(struct netxen_adapter *adapter);
void netxen_release_firmware(struct netxen_adapter *adapter);
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3cc047844af3..824103675648 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -853,6 +853,7 @@ enum {
#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
+#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 055bb61d6e77..b899bd51fcd8 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -684,11 +684,84 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
}
int
+netxen_need_fw_reset(struct netxen_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+ u8 fw_type;
+
+ /* NX2031 firmware doesn't support heartbit */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ /* last attempt had failed */
+ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ NXWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ const struct firmware *fw = adapter->fw;
+
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+ version = NETXEN_DECODE_VERSION(val);
+
+ major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+ if (version > NETXEN_VERSION_CODE(major, minor, build))
+ return 1;
+
+ if (version == NETXEN_VERSION_CODE(major, minor, build)) {
+
+ val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
+ fw_type = (val & 0x4) ?
+ NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
+
+ if (adapter->fw_type != fw_type)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static char *fw_name[] = {
+ "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
+};
+
+int
netxen_load_firmware(struct netxen_adapter *adapter)
{
u64 *ptr64;
u32 i, flashaddr, size;
const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
@@ -756,7 +829,7 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
{
__le32 val;
- u32 major, minor, build, ver, min_ver, bios;
+ u32 ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
@@ -768,21 +841,18 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
return -EINVAL;
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
- major = (__force u32)val & 0xff;
- minor = ((__force u32)val >> 8) & 0xff;
- build = (__force u32)val >> 16;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
min_ver = NETXEN_VERSION_CODE(4, 0, 216);
else
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
- ver = NETXEN_VERSION_CODE(major, minor, build);
+ ver = NETXEN_DECODE_VERSION(val);
- if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
+ if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
- fwname, major, minor, build);
+ fwname, _major(ver), _minor(ver), _build(ver));
return -EINVAL;
}
@@ -798,22 +868,21 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
if (netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&val))
return -EIO;
- major = (__force u32)val & 0xff;
- minor = ((__force u32)val >> 8) & 0xff;
- build = (__force u32)val >> 16;
- if (NETXEN_VERSION_CODE(major, minor, build) > ver)
+ val = NETXEN_DECODE_VERSION(val);
+ if (val > ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fwname);
return -EINVAL;
+ }
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
return 0;
}
-static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" };
-
void netxen_request_firmware(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
- int fw_type;
+ u8 fw_type;
struct pci_dev *pdev = adapter->pdev;
int rc = 0;
@@ -830,6 +899,8 @@ request_mn:
netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT) {
@@ -838,6 +909,10 @@ request_mn:
}
}
+ fw_type = NX_FLASH_ROMIMAGE;
+ adapter->fw = NULL;
+ goto done;
+
request_fw:
rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
if (rc != 0) {
@@ -846,6 +921,7 @@ request_fw:
goto request_mn;
}
+ fw_type = NX_FLASH_ROMIMAGE;
adapter->fw = NULL;
goto done;
}
@@ -859,16 +935,13 @@ request_fw:
goto request_mn;
}
+ fw_type = NX_FLASH_ROMIMAGE;
adapter->fw = NULL;
goto done;
}
done:
- if (adapter->fw)
- dev_info(&pdev->dev, "loading firmware from file %s\n",
- fw_name[fw_type]);
- else
- dev_info(&pdev->dev, "loading firmware from flash\n");
+ adapter->fw_type = fw_type;
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 2919a2d12bf4..27539ddf94c4 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -718,6 +718,10 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
if (request_fw)
netxen_request_firmware(adapter);
+ err = netxen_need_fw_reset(adapter);
+ if (err <= 0)
+ return err;
+
if (first_boot != 0x55555555) {
NXWR32(adapter, CRB_CMDPEG_STATE, 0);
netxen_pinit_from_rom(adapter, 0);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 61755cbd978e..eda94fcd4065 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -928,13 +928,32 @@ static void phy_state_machine(struct work_struct *work)
* Otherwise, it's 0, and we're
* still waiting for AN */
if (err > 0) {
- phydev->state = PHY_RUNNING;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else
+ phydev->state = PHY_NOLINK;
+ phydev->adjust_link(phydev->attached_dev);
} else {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
}
- } else
- phydev->state = PHY_RUNNING;
+ } else {
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else
+ phydev->state = PHY_NOLINK;
+ phydev->adjust_link(phydev->attached_dev);
+ }
break;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 156e02e8905d..6ed5317ab1c0 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1607,6 +1607,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_mb_about_fw(struct ql_adapter *qdev);
+void ql_link_on(struct ql_adapter *qdev);
+void ql_link_off(struct ql_adapter *qdev);
#if 1
#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 37c99fe79770..eb6a9ee640ed 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -59,7 +59,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb->pkt_delay =
cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
QPRINTK(qdev, IFUP, ERR,
@@ -82,7 +82,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb->pkt_delay =
cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
QPRINTK(qdev, IFUP, ERR,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 90d1f76c0e8b..5768af17f168 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -214,6 +214,10 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
return -ENOMEM;
}
+ status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+ if (status)
+ return status;
+
status = ql_wait_cfg(qdev, bit);
if (status) {
QPRINTK(qdev, IFUP, ERR,
@@ -221,12 +225,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
goto exit;
}
- status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
- if (status)
- goto exit;
ql_write32(qdev, ICB_L, (u32) map);
ql_write32(qdev, ICB_H, (u32) (map >> 32));
- ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
@@ -237,6 +237,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
*/
status = ql_wait_cfg(qdev, bit);
exit:
+ ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
pci_unmap_single(qdev->pdev, map, size, direction);
return status;
}
@@ -412,6 +413,57 @@ exit:
return status;
}
+/* Set or clear MAC address in hardware. We sometimes
+ * have to clear it to prevent wrong frame routing
+ * especially in a bonding environment.
+ */
+static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
+{
+ int status;
+ char zero_mac_addr[ETH_ALEN];
+ char *addr;
+
+ if (set) {
+ addr = &qdev->ndev->dev_addr[0];
+ QPRINTK(qdev, IFUP, DEBUG,
+ "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+ addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5]);
+ } else {
+ memset(zero_mac_addr, 0, ETH_ALEN);
+ addr = &zero_mac_addr[0];
+ QPRINTK(qdev, IFUP, DEBUG,
+ "Clearing MAC address on %s\n",
+ qdev->ndev->name);
+ }
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
+ "address.\n");
+ return status;
+}
+
+void ql_link_on(struct ql_adapter *qdev)
+{
+ QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
+ qdev->ndev->name);
+ netif_carrier_on(qdev->ndev);
+ ql_set_mac_addr(qdev, 1);
+}
+
+void ql_link_off(struct ql_adapter *qdev)
+{
+ QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
+ qdev->ndev->name);
+ netif_carrier_off(qdev->ndev);
+ ql_set_mac_addr(qdev, 0);
+}
+
/* Get a specific frame routing value from the CAM.
* Used for debug and reg dump.
*/
@@ -1628,7 +1680,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
- qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
+ qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
qdev->stats.tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
@@ -1660,13 +1712,13 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
/* Fire up a handler to reset the MPI processor. */
void ql_queue_fw_error(struct ql_adapter *qdev)
{
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
void ql_queue_asic_error(struct ql_adapter *qdev)
{
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
@@ -2104,7 +2156,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
}
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
- memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
+ memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -2743,7 +2795,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
ql_init_tx_ring(qdev, tx_ring);
- err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
+ err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
@@ -3008,7 +3060,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
int i;
u8 *hash_id = (u8 *) ricb->hash_cq_id;
- memset((void *)ricb, 0, sizeof(ricb));
+ memset((void *)ricb, 0, sizeof(*ricb));
ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
ricb->flags =
@@ -3030,7 +3082,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
- status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
+ status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
return status;
@@ -3039,25 +3091,40 @@ static int ql_start_rss(struct ql_adapter *qdev)
return status;
}
-/* Initialize the frame-to-queue routing. */
-static int ql_route_initialize(struct ql_adapter *qdev)
+static int ql_clear_routing_entries(struct ql_adapter *qdev)
{
- int status = 0;
- int i;
+ int i, status = 0;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
-
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM packets.\n");
- goto exit;
+ "Failed to init routing register for CAM "
+ "packets.\n");
+ break;
}
}
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+ int status = 0;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status)
+ goto exit;
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
@@ -3096,14 +3163,15 @@ exit:
int ql_cam_route_initialize(struct ql_adapter *qdev)
{
- int status;
+ int status, set;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ /* If check if the link is up and use to
+ * determine if we are setting or clearing
+ * the MAC address in the CAM.
+ */
+ set = ql_read32(qdev, STS);
+ set &= qdev->port_link_up;
+ status = ql_set_mac_addr(qdev, set);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
return status;
@@ -3210,9 +3278,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
{
u32 value;
int status = 0;
- unsigned long end_jiffies = jiffies +
- max((unsigned long)1, usecs_to_jiffies(30));
+ unsigned long end_jiffies;
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
+ return status;
+ }
+
+ end_jiffies = jiffies +
+ max((unsigned long)1, usecs_to_jiffies(30));
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
do {
@@ -3252,7 +3328,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
int i, status = 0;
struct rx_ring *rx_ring;
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
@@ -3319,8 +3395,12 @@ static int ql_adapter_up(struct ql_adapter *qdev)
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
ql_alloc_rx_buffers(qdev);
- if ((ql_read32(qdev, STS) & qdev->port_init))
- netif_carrier_on(qdev->ndev);
+ /* If the port is initialized and the
+ * link is up the turn on the carrier.
+ */
+ if ((ql_read32(qdev, STS) & qdev->port_init) &&
+ (ql_read32(qdev, STS) & qdev->port_link_up))
+ ql_link_on(qdev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@@ -3346,11 +3426,6 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
return -ENOMEM;
}
status = ql_request_irq(qdev);
- if (status)
- goto err_irq;
- return status;
-err_irq:
- ql_free_mem_resources(qdev);
return status;
}
@@ -3414,7 +3489,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
- memset((void *)tx_ring, 0, sizeof(tx_ring));
+ memset((void *)tx_ring, 0, sizeof(*tx_ring));
tx_ring->qdev = qdev;
tx_ring->wq_id = i;
tx_ring->wq_len = qdev->tx_ring_size;
@@ -3430,7 +3505,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
- memset((void *)rx_ring, 0, sizeof(rx_ring));
+ memset((void *)rx_ring, 0, sizeof(*rx_ring));
rx_ring->qdev = qdev;
rx_ring->cq_id = i;
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
@@ -3789,7 +3864,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
int pos, err = 0;
u16 val16;
- memset((void *)qdev, 0, sizeof(qdev));
+ memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "PCI device enable failed.\n");
@@ -3976,7 +4051,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
pci_disable_device(pdev);
return err;
}
- netif_carrier_off(ndev);
+ ql_link_off(qdev);
ql_display_dev_info(ndev);
cards_found++;
return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 71afbf8b9c50..6685bd97da91 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -238,7 +238,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
&qdev->mpi_port_cfg_work, 0);
}
- netif_carrier_on(qdev->ndev);
+ ql_link_on(qdev);
}
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -251,7 +251,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (status)
QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
}
static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -849,7 +849,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_PORT_RESET:
case MB_CMD_SET_PORT_CFG:
case MB_CMD_STOP_FW:
- netif_carrier_off(qdev->ndev);
+ ql_link_off(qdev);
/* Signal the resulting link up AEN
* that the frame routing and mac addr
* needs to be set.
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 343e8da1fa30..1f1757118a5c 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
int count;
int cpu;
- if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
}
- cpumask_clear(core_mask);
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) {
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 341882f959f3..a2d82ddb3b4d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 ioaddr, boguscnt = RX_RING_SIZE;
- u32 intr_status = 0;
+ u32 ioaddr, intr_status = 0;
ioaddr = ndev->base_addr;
spin_lock(&mdp->lock);
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & cd->eesr_err_check)
sh_eth_error(ndev, intr_status);
- if (--boguscnt < 0) {
- printk(KERN_WARNING
- "%s: Too much work at interrupt, status=0x%4.4x.\n",
- ndev->name, intr_status);
- }
-
other_irq:
spin_unlock(&mdp->lock);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7681d28c53d7..daf961ab68bc 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (likely(status >> 16 == (status & 0xffff))) {
skb = sky2->rx_ring[sky2->rx_next].skb;
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = status & 0xffff;
+ skb->csum = le16_to_cpu(status);
} else {
printk(KERN_NOTICE PFX "%s: hardware receive "
"checksum problem (status = %#x)\n",
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 5c61d5fad908..527c2ce59150 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -616,6 +616,14 @@ static void sl_uninit(struct net_device *dev)
sl_free_bufs(sl);
}
+/* Hook the destructor so we can free slip devices at the right point in time */
+static void sl_free_netdev(struct net_device *dev)
+{
+ int i = dev->base_addr;
+ free_netdev(dev);
+ slip_devs[i] = NULL;
+}
+
static const struct net_device_ops sl_netdev_ops = {
.ndo_init = sl_init,
.ndo_uninit = sl_uninit,
@@ -634,7 +642,7 @@ static const struct net_device_ops sl_netdev_ops = {
static void sl_setup(struct net_device *dev)
{
dev->netdev_ops = &sl_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = sl_free_netdev;
dev->hard_header_len = 0;
dev->addr_len = 0;
@@ -712,8 +720,6 @@ static void sl_sync(void)
static struct slip *sl_alloc(dev_t line)
{
int i;
- int sel = -1;
- int score = -1;
struct net_device *dev = NULL;
struct slip *sl;
@@ -724,55 +730,7 @@ static struct slip *sl_alloc(dev_t line)
dev = slip_devs[i];
if (dev == NULL)
break;
-
- sl = netdev_priv(dev);
- if (sl->leased) {
- if (sl->line != line)
- continue;
- if (sl->tty)
- return NULL;
-
- /* Clear ESCAPE & ERROR flags */
- sl->flags &= (1 << SLF_INUSE);
- return sl;
- }
-
- if (sl->tty)
- continue;
-
- if (current->pid == sl->pid) {
- if (sl->line == line && score < 3) {
- sel = i;
- score = 3;
- continue;
- }
- if (score < 2) {
- sel = i;
- score = 2;
- }
- continue;
- }
- if (sl->line == line && score < 1) {
- sel = i;
- score = 1;
- continue;
- }
- if (score < 0) {
- sel = i;
- score = 0;
- }
- }
-
- if (sel >= 0) {
- i = sel;
- dev = slip_devs[i];
- if (score > 1) {
- sl = netdev_priv(dev);
- sl->flags &= (1 << SLF_INUSE);
- return sl;
- }
}
-
/* Sorry, too many, all slots in use */
if (i >= slip_maxdev)
return NULL;
@@ -909,30 +867,13 @@ err_exit:
}
/*
-
- FIXME: 1,2 are fixed 3 was never true anyway.
-
- Let me to blame a bit.
- 1. TTY module calls this funstion on soft interrupt.
- 2. TTY module calls this function WITH MASKED INTERRUPTS!
- 3. TTY module does not notify us about line discipline
- shutdown,
-
- Seems, now it is clean. The solution is to consider netdevice and
- line discipline sides as two independent threads.
-
- By-product (not desired): sl? does not feel hangups and remains open.
- It is supposed, that user level program (dip, diald, slattach...)
- will catch SIGHUP and make the rest of work.
-
- I see no way to make more with current tty code. --ANK
- */
-
-/*
* Close down a SLIP channel.
* This means flushing out any pending queues, and then returning. This
* call is serialized against other ldisc functions.
+ *
+ * We also use this method fo a hangup event
*/
+
static void slip_close(struct tty_struct *tty)
{
struct slip *sl = tty->disc_data;
@@ -951,10 +892,16 @@ static void slip_close(struct tty_struct *tty)
del_timer_sync(&sl->keepalive_timer);
del_timer_sync(&sl->outfill_timer);
#endif
-
- /* Count references from TTY module */
+ /* Flush network side */
+ unregister_netdev(sl->dev);
+ /* This will complete via sl_free_netdev */
}
+static int slip_hangup(struct tty_struct *tty)
+{
+ slip_close(tty);
+ return 0;
+}
/************************************************************************
* STANDARD SLIP ENCAPSULATION *
************************************************************************/
@@ -1311,6 +1258,7 @@ static struct tty_ldisc_ops sl_ldisc = {
.name = "slip",
.open = slip_open,
.close = slip_close,
+ .hangup = slip_hangup,
.ioctl = slip_ioctl,
.receive_buf = slip_receive_buf,
.write_wakeup = slip_write_wakeup,
@@ -1384,6 +1332,8 @@ static void __exit slip_exit(void)
}
} while (busy && time_before(jiffies, timeout));
+ /* FIXME: hangup is async so we should wait when doing this second
+ phase */
for (i = 0; i < slip_maxdev; i++) {
dev = slip_devs[i];
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3b..cd35d50e46d4 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return crc == crc2;
if (unlikely(crc != crc2)) {
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
dev_kfree_skb_any(skb2);
} else
usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93a..1d3730d6690f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (unlikely(status & 0xbf)) {
- if (status & 0x01) dev->stats.rx_fifo_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x20) dev->stats.rx_missed_errors++;
- if (status & 0x90) dev->stats.rx_length_errors++;
+ if (status & 0x01) dev->net->stats.rx_fifo_errors++;
+ if (status & 0x02) dev->net->stats.rx_crc_errors++;
+ if (status & 0x04) dev->net->stats.rx_frame_errors++;
+ if (status & 0x20) dev->net->stats.rx_missed_errors++;
+ if (status & 0x90) dev->net->stats.rx_length_errors++;
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6b..aeb1ab03a9ee 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
net->hard_header_len, dev->hard_mtu, net->mtu);
#endif
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
}
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
hdr_len = le16_to_cpup(&header->hdr_len);
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("packet too big, %d", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("header too short, %d", hdr_len);
nc_ensure_sync(dev);
return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad pad");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad packet len %d (expected %d)",
skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
- dev->stats.rx_fifo_errors++;
+ dev->net->stats.rx_fifo_errors++;
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
le16_to_cpu(header->packet_id),
le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950e..2232232b7989 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|| skb->len < msg_len
|| (data_offset + data_len + 8) > msg_len)) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
le32_to_cpu(hdr->msg_type),
msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22de..fe045896406b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(header & RX_STS_ES_)) {
if (netif_msg_rx_err(dev))
devdbg(dev, "Error header=0x%08x", header);
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_dropped++;
if (header & RX_STS_CRC_) {
- dev->stats.rx_crc_errors++;
+ dev->net->stats.rx_crc_errors++;
} else {
if (header & (RX_STS_TL_ | RX_STS_RF_))
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
if ((header & RX_STS_LE_) &&
(!(header & RX_STS_FT_)))
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_length_errors++;
}
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a0319..edfd9e10ceba 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
int status;
skb->protocol = eth_type_trans (skb, dev->net);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
if (netif_msg_rx_status (dev))
devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (netif_msg_rx_err (dev))
devdbg (dev, "drop");
error:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
}
}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
case 0:
if (skb->len < dev->net->hard_header_len) {
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx length %d", skb->len);
}
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
* storm, recovering as needed.
*/
case -EPIPE:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
// FALLTHROUGH
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
case -EPROTO:
case -ETIME:
case -EILSEQ:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
- dev->stats.rx_over_errors++;
+ dev->net->stats.rx_over_errors++;
// FALLTHROUGH
default:
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx status %d", urb_status);
break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
if (netif_msg_ifdown (dev))
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- dev->stats.rx_packets, dev->stats.tx_packets,
- dev->stats.rx_errors, dev->stats.tx_errors
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors
);
// ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += entry->length;
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
} else {
- dev->stats.tx_errors++;
+ dev->net->stats.tx_errors++;
switch (urb->status) {
case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
devdbg (dev, "drop, code %d", retval);
drop:
retval = NET_XMIT_SUCCESS;
- dev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87197dd9c788..1097c72e44d5 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -208,11 +208,14 @@ rx_drop:
static struct net_device_stats *veth_get_stats(struct net_device *dev)
{
- struct veth_priv *priv = netdev_priv(dev);
- struct net_device_stats *dev_stats = &dev->stats;
- unsigned int cpu;
+ struct veth_priv *priv;
+ struct net_device_stats *dev_stats;
+ int cpu;
struct veth_net_stats *stats;
+ priv = netdev_priv(dev);
+ dev_stats = &dev->stats;
+
dev_stats->rx_packets = 0;
dev_stats->tx_packets = 0;
dev_stats->rx_bytes = 0;
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
dev_stats->tx_dropped = 0;
dev_stats->rx_dropped = 0;
- if (priv->stats)
- for_each_online_cpu(cpu) {
- stats = per_cpu_ptr(priv->stats, cpu);
+ for_each_online_cpu(cpu) {
+ stats = per_cpu_ptr(priv->stats, cpu);
- dev_stats->rx_packets += stats->rx_packets;
- dev_stats->tx_packets += stats->tx_packets;
- dev_stats->rx_bytes += stats->rx_bytes;
- dev_stats->tx_bytes += stats->tx_bytes;
- dev_stats->tx_dropped += stats->tx_dropped;
- dev_stats->rx_dropped += stats->rx_dropped;
- }
+ dev_stats->rx_packets += stats->rx_packets;
+ dev_stats->tx_packets += stats->tx_packets;
+ dev_stats->rx_bytes += stats->rx_bytes;
+ dev_stats->tx_bytes += stats->tx_bytes;
+ dev_stats->tx_dropped += stats->tx_dropped;
+ dev_stats->rx_dropped += stats->rx_dropped;
+ }
return dev_stats;
}
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev)
netif_carrier_off(dev);
netif_carrier_off(priv->peer);
- free_percpu(priv->stats);
- priv->stats = NULL;
return 0;
}
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev)
return 0;
}
+static void veth_dev_free(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ free_percpu(priv->stats);
+ free_netdev(dev);
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
- dev->destructor = free_netdev;
+ dev->destructor = veth_dev_free;
}
/*
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 8574622e36a5..c9e2ae90f195 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -154,9 +154,8 @@ int sync_start(void)
{
int err;
- if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- cpumask_clear(marked_cpus);
start_cpu_work();
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 242257b19441..a7aae24f2889 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/oprofile.h>
-#include <linux/vmalloc.h>
#include <linux/errno.h>
#include "event_buffer.h"
@@ -407,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val)
return op_cpu_buffer_add_data(entry, val);
}
+int oprofile_add_data64(struct op_entry *entry, u64 val)
+{
+ if (!entry->event)
+ return 0;
+ if (op_cpu_buffer_get_size(entry) < 2)
+ /*
+ * the function returns 0 to indicate a too small
+ * buffer, even if there is some space left
+ */
+ return 0;
+ if (!op_cpu_buffer_add_data(entry, (u32)val))
+ return 0;
+ return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
+}
+
int oprofile_write_commit(struct op_entry *entry)
{
if (!entry->event)
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index e1f6ce03705e..efefae539a59 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -33,6 +33,7 @@ void oprofile_reset_stats(void)
atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
+ atomic_set(&oprofile_stats.bt_lost_no_mapping,0);
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 5d610cbcfe80..0f0e0b919ef4 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1134,7 +1134,7 @@ static const struct file_operations ccio_proc_bitmap_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif
+#endif /* CONFIG_PROC_FS */
/**
* ccio_find_ioc - Find the ioc in the ioc_list
@@ -1568,14 +1568,15 @@ static int __init ccio_probe(struct parisc_device *dev)
/* if this fails, no I/O cards will work, so may as well bug */
BUG_ON(dev->dev.platform_data == NULL);
HBA_DATA(dev->dev.platform_data)->iommu = ioc;
-
+
+#ifdef CONFIG_PROC_FS
if (ioc_count == 0) {
proc_create(MODULE_NAME, 0, proc_runway_root,
&ccio_proc_info_fops);
proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
&ccio_proc_bitmap_fops);
}
-
+#endif
ioc_count++;
parisc_has_iommu();
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 52ae0b1d470c..c590974e9815 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -353,7 +353,7 @@ static unsigned int dino_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type dino_interrupt_type = {
+static struct irq_chip dino_interrupt_type = {
.typename = "GSC-PCI",
.startup = dino_startup_irq,
.shutdown = dino_disable_irq,
@@ -1019,22 +1019,22 @@ static int __init dino_probe(struct parisc_device *dev)
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
- bus = pci_scan_bus_parented(&dev->dev, dino_current_bus,
- &dino_cfg_ops, NULL);
+ dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev,
+ dino_current_bus, &dino_cfg_ops, NULL);
+
if(bus) {
- pci_bus_add_devices(bus);
/* This code *depends* on scanning being single threaded
* if it isn't, this global bus number count will fail
*/
dino_current_bus = bus->subordinate + 1;
pci_bus_assign_resources(bus);
+ pci_bus_add_devices(bus);
} else {
- printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n",
+ printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
dev_name(&dev->dev), dino_current_bus);
/* increment the bus number in case of duplicates */
dino_current_bus++;
}
- dino_dev->hba.hba_bus = bus;
return 0;
}
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 5b89f404e668..51220749cb65 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -188,7 +188,7 @@ static unsigned int eisa_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type eisa_interrupt_type = {
+static struct irq_chip eisa_interrupt_type = {
.typename = "EISA",
.startup = eisa_startup_irq,
.shutdown = eisa_disable_irq,
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index d33632917696..647adc9f85ad 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -148,7 +148,7 @@ static unsigned int gsc_asic_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type gsc_asic_interrupt_type = {
+static struct irq_chip gsc_asic_interrupt_type = {
.typename = "GSC-ASIC",
.startup = gsc_asic_startup_irq,
.shutdown = gsc_asic_disable_irq,
@@ -158,7 +158,7 @@ static struct hw_interrupt_type gsc_asic_interrupt_type = {
.end = no_end_irq,
};
-int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
+int gsc_assign_irq(struct irq_chip *type, void *data)
{
static int irq = GSC_IRQ_BASE;
struct irq_desc *desc;
diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h
index 762a1babad60..b9d7bfb68e24 100644
--- a/drivers/parisc/gsc.h
+++ b/drivers/parisc/gsc.h
@@ -38,7 +38,7 @@ struct gsc_asic {
int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic);
int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
-int gsc_assign_irq(struct hw_interrupt_type *type, void *data);
+int gsc_assign_irq(struct irq_chip *type, void *data);
int gsc_find_local_irq(unsigned int irq, int *global_irq, int limit);
void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl,
void (*choose)(struct parisc_device *child, void *ctrl));
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 4a9cc92d4d18..88e333553212 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -729,7 +729,7 @@ static int iosapic_set_affinity_irq(unsigned int irq,
}
#endif
-static struct hw_interrupt_type iosapic_interrupt_type = {
+static struct irq_chip iosapic_interrupt_type = {
.typename = "IO-SAPIC-level",
.startup = iosapic_startup_irq,
.shutdown = iosapic_disable_irq,
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 59fbbf128365..ede614616f8e 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -980,28 +980,38 @@ static void
lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
{
unsigned long bytecnt;
- pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */
- pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */
long io_count;
long status; /* PDC return status */
long pa_count;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */
+ pdc_pat_cell_mod_maddr_block_t *io_pdc_cell; /* IO_VIEW */
int i;
+ pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ return;
+
+ io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
+ if (!pa_pdc_cell) {
+ kfree(pa_pdc_cell);
+ return;
+ }
+
/* return cell module (IO view) */
status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
- PA_VIEW, & pa_pdc_cell);
- pa_count = pa_pdc_cell.mod[1];
+ PA_VIEW, pa_pdc_cell);
+ pa_count = pa_pdc_cell->mod[1];
status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
- IO_VIEW, &io_pdc_cell);
- io_count = io_pdc_cell.mod[1];
+ IO_VIEW, io_pdc_cell);
+ io_count = io_pdc_cell->mod[1];
/* We've already done this once for device discovery...*/
if (status != PDC_OK) {
panic("pdc_pat_cell_module() call failed for LBA!\n");
}
- if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) {
+ if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
}
@@ -1016,8 +1026,8 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
} *p, *io;
struct resource *r;
- p = (void *) &(pa_pdc_cell.mod[2+i*3]);
- io = (void *) &(io_pdc_cell.mod[2+i*3]);
+ p = (void *) &(pa_pdc_cell->mod[2+i*3]);
+ io = (void *) &(io_pdc_cell->mod[2+i*3]);
/* Convert the PAT range data to PCI "struct resource" */
switch(p->type & 0xff) {
@@ -1096,6 +1106,9 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
break;
}
}
+
+ kfree(pa_pdc_cell);
+ kfree(io_pdc_cell);
}
#else
/* keep compiler from complaining about missing declarations */
@@ -1509,10 +1522,6 @@ lba_driver_probe(struct parisc_device *dev)
lba_bus = lba_dev->hba.hba_bus =
pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
cfg_ops, NULL);
- if (lba_bus) {
- lba_next_bus = lba_bus->subordinate + 1;
- pci_bus_add_devices(lba_bus);
- }
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {
@@ -1533,7 +1542,6 @@ lba_driver_probe(struct parisc_device *dev)
}
pci_enable_bridges(lba_bus);
-
/*
** Once PCI register ops has walked the bus, access to config
** space is restricted. Avoids master aborts on config cycles.
@@ -1543,6 +1551,11 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
}
+ if (lba_bus) {
+ lba_next_bus = lba_bus->subordinate + 1;
+ pci_bus_add_devices(lba_bus);
+ }
+
/* Whew! Finally done! Tell services we got this one covered. */
return 0;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index d46dd57450ac..123d8fe3427d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2057,6 +2057,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
r->start = (base & ~1UL) | PCI_F_EXTEND;
size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
r->end = r->start + size;
+ r->flags = IORESOURCE_MEM;
}
}
@@ -2093,4 +2094,5 @@ void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
r->start += rope * (size + 1); /* adjust base for this rope */
r->end = r->start + size;
+ r->flags = IORESOURCE_MEM;
}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 33e5ade774ca..675f04e6597a 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -325,7 +325,7 @@ static unsigned int superio_startup_irq(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type superio_interrupt_type = {
+static struct irq_chip superio_interrupt_type = {
.typename = SUPERIO,
.startup = superio_startup_irq,
.shutdown = superio_disable_irq,
@@ -434,8 +434,8 @@ static void __init superio_parport_init(void)
0 /*base_hi*/,
PAR_IRQ,
PARPORT_DMA_NONE /* dma */,
- NULL /*struct pci_dev* */),
- 0 /* shared irq flags */ )
+ NULL /*struct pci_dev* */,
+ 0 /* shared irq flags */))
printk(KERN_WARNING PFX "Probing parallel port failed.\n");
#endif /* CONFIG_PARPORT_PC */
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1032d5fdbd42..2597145a066e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ netmos_9901,
quatech_sppxp100,
};
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
-
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x2000, 0, 0, netmos_9901 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 844580489d4d..5c5043f239cf 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
* @slot: pointer to the &struct hotplug_slot to register
* @devnr: device number
* @name: name registered with kobject core
+ * @owner: caller module owner
+ * @mod_name: caller module name
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
* userspace interaction to the slot.
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e53eacd75c8d..360fb67a30d7 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,7 +39,6 @@
#include <linux/sysdev.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include <asm/e820.h>
#include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE
@@ -57,14 +56,32 @@
#define MAX_AGAW_WIDTH 64
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
+#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
-#ifndef PHYSICAL_PAGE_MASK
-#define PHYSICAL_PAGE_MASK PAGE_MASK
-#endif
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+ are never going to work. */
+static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
+{
+ return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+
+static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
+{
+ return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+ return mm_to_dma_pfn(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+ return page_to_dma_pfn(virt_to_page(p));
+}
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
@@ -205,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
- return (pte->val & VTD_PAGE_MASK);
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK;
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
}
-static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
+static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
{
- pte->val |= (addr & VTD_PAGE_MASK);
+ pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
}
static inline bool dma_pte_present(struct dma_pte *pte)
@@ -218,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+ return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -245,7 +272,6 @@ struct dmar_domain {
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
- spinlock_t mapping_lock; /* page table lock */
int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */
@@ -649,80 +675,78 @@ static inline int width_to_agaw(int width)
static inline unsigned int level_to_offset_bits(int level)
{
- return (12 + (level - 1) * LEVEL_STRIDE);
+ return (level - 1) * LEVEL_STRIDE;
}
-static inline int address_level_offset(u64 addr, int level)
+static inline int pfn_level_offset(unsigned long pfn, int level)
{
- return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline u64 level_mask(int level)
+static inline unsigned long level_mask(int level)
{
- return ((u64)-1 << level_to_offset_bits(level));
+ return -1UL << level_to_offset_bits(level);
}
-static inline u64 level_size(int level)
+static inline unsigned long level_size(int level)
{
- return ((u64)1 << level_to_offset_bits(level));
+ return 1UL << level_to_offset_bits(level);
}
-static inline u64 align_to_level(u64 addr, int level)
+static inline unsigned long align_to_level(unsigned long pfn, int level)
{
- return ((addr + level_size(level) - 1) & level_mask(level));
+ return (pfn + level_size(level) - 1) & level_mask(level);
}
-static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
+static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ unsigned long pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
int offset;
- unsigned long flags;
BUG_ON(!domain->pgd);
-
- addr &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
- spin_lock_irqsave(&domain->mapping_lock, flags);
while (level > 0) {
void *tmp_page;
- offset = address_level_offset(addr, level);
+ offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
if (level == 1)
break;
if (!dma_pte_present(pte)) {
+ uint64_t pteval;
+
tmp_page = alloc_pgtable_page();
- if (!tmp_page) {
- spin_unlock_irqrestore(&domain->mapping_lock,
- flags);
+ if (!tmp_page)
return NULL;
+
+ domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+ pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+ /* Someone else set it while we were thinking; use theirs. */
+ free_pgtable_page(tmp_page);
+ } else {
+ dma_pte_addr(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
- domain_flush_cache(domain, tmp_page, PAGE_SIZE);
- dma_set_pte_addr(pte, virt_to_phys(tmp_page));
- /*
- * high level table always sets r/w, last level page
- * table control read/write
- */
- dma_set_pte_readable(pte);
- dma_set_pte_writable(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
}
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
- spin_unlock_irqrestore(&domain->mapping_lock, flags);
return pte;
}
/* return address's pte at specific level */
-static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
- int level)
+static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
+ unsigned long pfn,
+ int level)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@@ -730,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
parent = domain->pgd;
while (level <= total) {
- offset = address_level_offset(addr, total);
+ offset = pfn_level_offset(pfn, total);
pte = &parent[offset];
if (level == total)
return pte;
@@ -743,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
return NULL;
}
-/* clear one page's page table */
-static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
-{
- struct dma_pte *pte = NULL;
-
- /* get last level pte */
- pte = dma_addr_level_pte(domain, addr, 1);
-
- if (pte) {
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
-}
-
/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
+static void dma_pte_clear_range(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
- int npages;
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *first_pte, *pte;
- start &= (((u64)1) << addr_width) - 1;
- end &= (((u64)1) << addr_width) - 1;
- /* in case it's partial page */
- start &= PAGE_MASK;
- end = PAGE_ALIGN(end);
- npages = (end - start) / VTD_PAGE_SIZE;
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- /* we don't need lock here, nobody else touches the iova range */
- while (npages--) {
- dma_pte_clear_one(domain, start);
- start += VTD_PAGE_SIZE;
+ /* we don't need lock here; nobody else touches the iova range */
+ while (start_pfn <= last_pfn) {
+ first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
+ if (!pte) {
+ start_pfn = align_to_level(start_pfn + 1, 2);
+ continue;
+ }
+ do {
+ dma_clear_pte(pte);
+ start_pfn++;
+ pte++;
+ } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
+
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
}
}
/* free page table pages. last level pte should already be cleared */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
- u64 start, u64 end)
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
- struct dma_pte *pte;
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *first_pte, *pte;
int total = agaw_to_level(domain->agaw);
int level;
- u64 tmp;
+ unsigned long tmp;
- start &= (((u64)1) << addr_width) - 1;
- end &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- /* we don't need lock here, nobody else touches the iova range */
+ /* We don't need lock here; nobody else touches the iova range */
level = 2;
while (level <= total) {
- tmp = align_to_level(start, level);
- if (tmp >= end || (tmp + level_size(level) > end))
+ tmp = align_to_level(start_pfn, level);
+
+ /* If we can't even clear one PTE at this level, we're done */
+ if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp < end) {
- pte = dma_addr_level_pte(domain, tmp, level);
- if (pte) {
- free_pgtable_page(
- phys_to_virt(dma_pte_addr(pte)));
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
+ while (tmp + level_size(level) - 1 <= last_pfn) {
+ first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+ if (!pte) {
+ tmp = align_to_level(tmp + 1, level + 1);
+ continue;
}
- tmp += level_size(level);
+ do {
+ if (dma_pte_present(pte)) {
+ free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+ dma_clear_pte(pte);
+ }
+ pte++;
+ tmp += level_size(level);
+ } while (!first_pte_in_page(pte) &&
+ tmp + level_size(level) - 1 <= last_pfn);
+
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+
}
level++;
}
/* free pgd */
- if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
+ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
free_pgtable_page(domain->pgd);
domain->pgd = NULL;
}
@@ -1036,11 +1068,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int pages)
+ unsigned long pfn, unsigned int pages)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
+ uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);
/*
@@ -1055,7 +1087,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
else
iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH);
- if (did)
+
+ /*
+ * In caching mode, domain ID 0 is reserved for non-present to present
+ * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+ */
+ if (!cap_caching_mode(iommu->cap) || did)
iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
@@ -1280,7 +1317,6 @@ static void dmar_init_reserved_ranges(void)
struct pci_dev *pdev = NULL;
struct iova *iova;
int i;
- u64 addr, size;
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
@@ -1303,12 +1339,9 @@ static void dmar_init_reserved_ranges(void)
r = &pdev->resource[i];
if (!r->flags || !(r->flags & IORESOURCE_MEM))
continue;
- addr = r->start;
- addr &= PHYSICAL_PAGE_MASK;
- size = r->end - addr;
- size = PAGE_ALIGN(size);
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
- IOVA_PFN(size + addr) - 1);
+ iova = reserve_iova(&reserved_iova_list,
+ IOVA_PFN(r->start),
+ IOVA_PFN(r->end));
if (!iova)
printk(KERN_ERR "Reserve iova failed\n");
}
@@ -1342,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain);
@@ -1389,7 +1421,6 @@ static void domain_exit(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- u64 end;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1398,14 +1429,12 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
- end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~PAGE_MASK);
/* clear ptes */
- dma_pte_clear_range(domain, 0, end);
+ dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
/* free page tables */
- dma_pte_free_pagetable(domain, 0, end);
+ dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
for_each_active_iommu(iommu, drhd)
if (test_bit(iommu->seq_id, &domain->iommu_bmp))
@@ -1619,42 +1648,86 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn);
}
-static int
-domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
- u64 hpa, size_t size, int prot)
+static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
{
- u64 start_pfn, end_pfn;
- struct dma_pte *pte;
- int index;
- int addr_width = agaw_to_width(domain->agaw);
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ phys_addr_t uninitialized_var(pteval);
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned long sg_res;
- hpa &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
- iova &= PAGE_MASK;
- start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
- end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
- index = 0;
- while (start_pfn < end_pfn) {
- pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
- if (!pte)
- return -ENOMEM;
+
+ prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+
+ if (sg)
+ sg_res = 0;
+ else {
+ sg_res = nr_pages + 1;
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+ }
+
+ while (nr_pages--) {
+ uint64_t tmp;
+
+ if (!sg_res) {
+ sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
+ sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+ sg->dma_length = sg->length;
+ pteval = page_to_phys(sg_page(sg)) | prot;
+ }
+ if (!pte) {
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
+ if (!pte)
+ return -ENOMEM;
+ }
/* We don't need lock here, nobody else
* touches the iova range
*/
- BUG_ON(dma_pte_addr(pte));
- dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
- dma_set_pte_prot(pte, prot);
- if (prot & DMA_PTE_SNP)
- dma_set_pte_snp(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- start_pfn++;
- index++;
+ tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
+ if (tmp) {
+ static int dumps = 5;
+ printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+ iov_pfn, tmp, (unsigned long long)pteval);
+ if (dumps) {
+ dumps--;
+ debug_dma_dump_mappings(NULL);
+ }
+ WARN_ON(1);
+ }
+ pte++;
+ if (!nr_pages || first_pte_in_page(pte)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ iov_pfn++;
+ pteval += VTD_PAGE_SIZE;
+ sg_res--;
+ if (!sg_res)
+ sg = sg_next(sg);
}
return 0;
}
+static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long nr_pages,
+ int prot)
+{
+ return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+}
+
+static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages,
+ int prot)
+{
+ return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+}
+
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu)
@@ -1845,58 +1918,61 @@ error:
static int iommu_identity_mapping;
+static int iommu_domain_identity_map(struct dmar_domain *domain,
+ unsigned long long start,
+ unsigned long long end)
+{
+ unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
+ unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
+
+ if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
+ dma_to_mm_pfn(last_vpfn))) {
+ printk(KERN_ERR "IOMMU: reserve iova failed\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
+ start, end, domain->id);
+ /*
+ * RMRR range might have overlap with physical memory range,
+ * clear it first
+ */
+ dma_pte_clear_range(domain, first_vpfn, last_vpfn);
+
+ return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
+ last_vpfn - first_vpfn + 1,
+ DMA_PTE_READ|DMA_PTE_WRITE);
+}
+
static int iommu_prepare_identity_map(struct pci_dev *pdev,
unsigned long long start,
unsigned long long end)
{
struct dmar_domain *domain;
- unsigned long size;
- unsigned long long base;
int ret;
printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
- if (iommu_identity_mapping)
- domain = si_domain;
- else
- /* page table init */
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
- /* The address might not be aligned */
- base = start & PAGE_MASK;
- size = end - base;
- size = PAGE_ALIGN(size);
- if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
- IOVA_PFN(base + size) - 1)) {
- printk(KERN_ERR "IOMMU: reserve iova failed\n");
- ret = -ENOMEM;
- goto error;
- }
-
- pr_debug("Mapping reserved region %lx@%llx for %s\n",
- size, base, pci_name(pdev));
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, base, base + size);
-
- ret = domain_page_mapping(domain, base, base, size,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ ret = iommu_domain_identity_map(domain, start, end);
if (ret)
goto error;
/* context entry init */
ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
- if (!ret)
- return 0;
-error:
+ if (ret)
+ goto error;
+
+ return 0;
+
+ error:
domain_exit(domain);
return ret;
-
}
static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
@@ -1908,64 +1984,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
rmrr->end_address + 1);
}
-#ifdef CONFIG_DMAR_GFX_WA
-struct iommu_prepare_data {
- struct pci_dev *pdev;
- int ret;
-};
-
-static int __init iommu_prepare_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct iommu_prepare_data *data;
-
- data = (struct iommu_prepare_data *)datax;
-
- data->ret = iommu_prepare_identity_map(data->pdev,
- start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
- return data->ret;
-
-}
-
-static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
-{
- int nid;
- struct iommu_prepare_data data;
-
- data.pdev = pdev;
- data.ret = 0;
-
- for_each_online_node(nid) {
- work_with_active_regions(nid, iommu_prepare_work_fn, &data);
- if (data.ret)
- return data.ret;
- }
- return data.ret;
-}
-
-static void __init iommu_prepare_gfx_mapping(void)
-{
- struct pci_dev *pdev = NULL;
- int ret;
-
- for_each_pci_dev(pdev) {
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
- !IS_GFX_DEVICE(pdev))
- continue;
- printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
- pci_name(pdev));
- ret = iommu_prepare_with_active_regions(pdev);
- if (ret)
- printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
- }
-}
-#else /* !CONFIG_DMAR_GFX_WA */
-static inline void iommu_prepare_gfx_mapping(void)
-{
- return;
-}
-#endif
-
#ifdef CONFIG_DMAR_FLOPPY_WA
static inline void iommu_prepare_isa(void)
{
@@ -1976,12 +1994,12 @@ static inline void iommu_prepare_isa(void)
if (!pdev)
return;
- printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
+ printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
if (ret)
- printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
- "floppy might not work\n");
+ printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
+ "floppy might not work\n");
}
#else
@@ -2009,16 +2027,30 @@ static int __init init_context_pass_through(void)
}
static int md_domain_init(struct dmar_domain *domain, int guest_width);
+
+static int __init si_domain_work_fn(unsigned long start_pfn,
+ unsigned long end_pfn, void *datax)
+{
+ int *ret = datax;
+
+ *ret = iommu_domain_identity_map(si_domain,
+ (uint64_t)start_pfn << PAGE_SHIFT,
+ (uint64_t)end_pfn << PAGE_SHIFT);
+ return *ret;
+
+}
+
static int si_domain_init(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int ret = 0;
+ int nid, ret = 0;
si_domain = alloc_domain();
if (!si_domain)
return -EFAULT;
+ pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
@@ -2035,6 +2067,12 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ for_each_online_node(nid) {
+ work_with_active_regions(nid, si_domain_work_fn, &ret);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -2079,9 +2117,49 @@ static int domain_add_dev_info(struct dmar_domain *domain,
return 0;
}
+static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+{
+ if (iommu_identity_mapping == 2)
+ return IS_GFX_DEVICE(pdev);
+
+ /*
+ * We want to start off with all devices in the 1:1 domain, and
+ * take them out later if we find they can't access all of memory.
+ *
+ * However, we can't do this for PCI devices behind bridges,
+ * because all PCI devices behind the same bridge will end up
+ * with the same source-id on their transactions.
+ *
+ * Practically speaking, we can't change things around for these
+ * devices at run-time, because we can't be sure there'll be no
+ * DMA transactions in flight for any of their siblings.
+ *
+ * So PCI devices (unless they're on the root bus) as well as
+ * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+ * the 1:1 domain, just in _case_ one of their siblings turns out
+ * not to be able to map all of memory.
+ */
+ if (!pdev->is_pcie) {
+ if (!pci_is_root_bus(pdev->bus))
+ return 0;
+ if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+ return 0;
+ } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ /*
+ * At boot time, we don't yet know if devices will be 64-bit capable.
+ * Assume that they will -- if they turn out not to be, then we can
+ * take them out of the 1:1 domain later.
+ */
+ if (!startup)
+ return pdev->dma_mask > DMA_BIT_MASK(32);
+
+ return 1;
+}
+
static int iommu_prepare_static_identity_mapping(void)
{
- int i;
struct pci_dev *pdev = NULL;
int ret;
@@ -2089,23 +2167,19 @@ static int iommu_prepare_static_identity_mapping(void)
if (ret)
return -EFAULT;
- printk(KERN_INFO "IOMMU: Setting identity map:\n");
for_each_pci_dev(pdev) {
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
-
- if (ei->type == E820_RAM) {
- ret = iommu_prepare_identity_map(pdev,
- ei->addr, ei->addr + ei->size);
- if (ret) {
- printk(KERN_INFO "1:1 mapping to one domain failed.\n");
- return -EFAULT;
- }
- }
+ if (iommu_should_identity_map(pdev, 1)) {
+ printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
+ pci_name(pdev));
+
+ ret = domain_context_mapping(si_domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
+ if (ret)
+ return ret;
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return ret;
}
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return ret;
}
return 0;
@@ -2260,6 +2334,10 @@ int __init init_dmars(void)
* identity mapping if iommu_identity_mapping is set.
*/
if (!iommu_pass_through) {
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+ if (!iommu_identity_mapping)
+ iommu_identity_mapping = 2;
+#endif
if (iommu_identity_mapping)
iommu_prepare_static_identity_mapping();
/*
@@ -2293,8 +2371,6 @@ int __init init_dmars(void)
}
}
- iommu_prepare_gfx_mapping();
-
iommu_prepare_isa();
}
@@ -2339,50 +2415,40 @@ error:
return ret;
}
-static inline u64 aligned_size(u64 host_addr, size_t size)
-{
- u64 addr;
- addr = (host_addr & (~PAGE_MASK)) + size;
- return PAGE_ALIGN(addr);
-}
-
-struct iova *
-iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+ size_t size)
{
- struct iova *piova;
-
- /* Make sure it's in range */
- end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
- if (!size || (IOVA_START_ADDR + size > end))
- return NULL;
-
- piova = alloc_iova(&domain->iovad,
- size >> PAGE_SHIFT, IOVA_PFN(end), 1);
- return piova;
+ host_addr &= ~PAGE_MASK;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
-static struct iova *
-__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
- size_t size, u64 dma_mask)
+/* This takes a number of _MM_ pages, not VTD pages */
+static struct iova *intel_alloc_iova(struct device *dev,
+ struct dmar_domain *domain,
+ unsigned long nrpages, uint64_t dma_mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct iova *iova = NULL;
- if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
- iova = iommu_alloc_iova(domain, size, dma_mask);
- else {
+ /* Restrict dma_mask to the width that the iommu can handle */
+ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+
+ if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
* First try to allocate an io virtual address in
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
- if (!iova)
- iova = iommu_alloc_iova(domain, size, dma_mask);
- }
-
- if (!iova) {
- printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
+ iova = alloc_iova(&domain->iovad, nrpages,
+ IOVA_PFN(DMA_BIT_MASK(32)), 1);
+ if (iova)
+ return iova;
+ }
+ iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
+ if (unlikely(!iova)) {
+ printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+ nrpages, pci_name(pdev));
return NULL;
}
@@ -2424,16 +2490,24 @@ static int iommu_dummy(struct pci_dev *pdev)
}
/* Check if the pdev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct pci_dev *pdev)
+static int iommu_no_mapping(struct device *dev)
{
+ struct pci_dev *pdev;
int found;
+ if (unlikely(dev->bus != &pci_bus_type))
+ return 1;
+
+ pdev = to_pci_dev(dev);
+ if (iommu_dummy(pdev))
+ return 1;
+
if (!iommu_identity_mapping)
- return iommu_dummy(pdev);
+ return 0;
found = identity_mapping(pdev);
if (found) {
- if (pdev->dma_mask > DMA_BIT_MASK(32))
+ if (iommu_should_identity_map(pdev, 0))
return 1;
else {
/*
@@ -2450,9 +2524,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (pdev->dma_mask > DMA_BIT_MASK(32)) {
+ if (iommu_should_identity_map(pdev, 0)) {
int ret;
ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return 0;
+ ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev));
@@ -2461,7 +2538,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
}
}
- return iommu_dummy(pdev);
+ return 0;
}
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
@@ -2477,7 +2554,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return paddr;
domain = get_valid_domain_for_dev(pdev);
@@ -2485,14 +2562,13 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
return 0;
iommu = domain_get_iommu(domain);
- size = aligned_size((u64)paddr, size);
+ size = aligned_nrpages(paddr, size);
- iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova)
goto error;
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
-
/*
* Check if DMAR supports zero-length reads on write only
* mappings..
@@ -2508,20 +2584,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_page_mapping(domain, start_paddr,
- ((u64)paddr) & PHYSICAL_PAGE_MASK,
- size, prot);
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+ paddr >> VTD_PAGE_SHIFT, size, prot);
if (ret)
goto error;
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_paddr,
- size >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
else
iommu_flush_write_buffer(iommu);
- return start_paddr + ((u64)paddr & (~PAGE_MASK));
+ start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+ start_paddr += paddr & ~PAGE_MASK;
+ return start_paddr;
error:
if (iova)
@@ -2614,11 +2690,11 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
{
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_pfn, last_pfn;
struct iova *iova;
struct intel_iommu *iommu;
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(dev))
return;
domain = find_domain(pdev);
@@ -2627,22 +2703,25 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (!iova)
+ if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
+ (unsigned long long)dev_addr))
return;
- start_addr = iova->pfn_lo << PAGE_SHIFT;
- size = aligned_size((u64)dev_addr, size);
+ start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+ last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
- pr_debug("Device %s unmapping: %zx@%llx\n",
- pci_name(pdev), size, (unsigned long long)start_addr);
+ pr_debug("Device %s unmapping: pfn %lx-%lx\n",
+ pci_name(pdev), start_pfn, last_pfn);
/* clear the whole page */
- dma_pte_clear_range(domain, start_addr, start_addr + size);
+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* free page tables */
- dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+ dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+
if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ last_pfn - start_pfn + 1);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2700,17 +2779,13 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_pfn, last_pfn;
struct iova *iova;
- size_t size = 0;
- phys_addr_t addr;
- struct scatterlist *sg;
struct intel_iommu *iommu;
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return;
domain = find_domain(pdev);
@@ -2719,22 +2794,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
- if (!iova)
+ if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
+ (unsigned long long)sglist[0].dma_address))
return;
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size += aligned_size((u64)addr, sg->length);
- }
- start_addr = iova->pfn_lo << PAGE_SHIFT;
+ start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+ last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
/* clear the whole page */
- dma_pte_clear_range(domain, start_addr, start_addr + size);
+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* free page tables */
- dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+ dma_pte_free_pagetable(domain, start_pfn, last_pfn);
- iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ (last_pfn - start_pfn + 1));
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -2757,21 +2831,20 @@ static int intel_nontranslate_map_sg(struct device *hddev,
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- phys_addr_t addr;
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- size_t offset = 0;
+ size_t offset_pfn = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *sg;
- unsigned long start_addr;
+ unsigned long start_vpfn;
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
@@ -2780,12 +2853,11 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
iommu = domain_get_iommu(domain);
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size += aligned_size((u64)addr, sg->length);
- }
+ for_each_sg(sglist, sg, nelems, i)
+ size += aligned_nrpages(sg->offset, sg->length);
- iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
@@ -2801,35 +2873,24 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_addr = iova->pfn_lo << PAGE_SHIFT;
- offset = 0;
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size = aligned_size((u64)addr, sg->length);
- ret = domain_page_mapping(domain, start_addr + offset,
- ((u64)addr) & PHYSICAL_PAGE_MASK,
- size, prot);
- if (ret) {
- /* clear the page */
- dma_pte_clear_range(domain, start_addr,
- start_addr + offset);
- /* free page tables */
- dma_pte_free_pagetable(domain, start_addr,
- start_addr + offset);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- return 0;
- }
- sg->dma_address = start_addr + offset +
- ((u64)addr & (~PAGE_MASK));
- sg->dma_length = sg->length;
- offset += size;
+ start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+
+ ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
+ if (unlikely(ret)) {
+ /* clear the page */
+ dma_pte_clear_range(domain, start_vpfn,
+ start_vpfn + size - 1);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_vpfn,
+ start_vpfn + size - 1);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ return 0;
}
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_addr,
- offset >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
else
iommu_flush_write_buffer(iommu);
@@ -3334,7 +3395,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain);
@@ -3388,8 +3448,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
static void vm_domain_exit(struct dmar_domain *domain)
{
- u64 end;
-
/* Domain 0 is reserved, so dont process it */
if (!domain)
return;
@@ -3397,14 +3455,12 @@ static void vm_domain_exit(struct dmar_domain *domain)
vm_domain_remove_all_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
- end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~VTD_PAGE_MASK);
/* clear ptes */
- dma_pte_clear_range(domain, 0, end);
+ dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
/* free page tables */
- dma_pte_free_pagetable(domain, 0, end);
+ dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
iommu_free_vm_domain(domain);
free_domain_mem(domain);
@@ -3513,7 +3569,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+ max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
int min_agaw;
u64 end;
@@ -3531,8 +3587,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
}
dmar_domain->max_addr = max_addr;
}
-
- ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
+ /* Round up size to next multiple of PAGE_SIZE, if it and
+ the low bits of hpa would take us onto the next page */
+ size = aligned_nrpages(hpa, size);
+ ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
return ret;
}
@@ -3540,15 +3599,12 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- dma_addr_t base;
- /* The address might not be aligned */
- base = iova & VTD_PAGE_MASK;
- size = VTD_PAGE_ALIGN(size);
- dma_pte_clear_range(dmar_domain, base, base + size);
+ dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ (iova + size - 1) >> VTD_PAGE_SHIFT);
- if (dmar_domain->max_addr == base + size)
- dmar_domain->max_addr = base;
+ if (dmar_domain->max_addr == iova + size)
+ dmar_domain->max_addr = iova;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3558,7 +3614,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
- pte = addr_to_dma_pte(dmar_domain, iova);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
if (pte)
phys = dma_pte_addr(pte);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 2287116e9822..46dd440e2315 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -1,9 +1,19 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2009, Intel Corporation.
*
- * This file is released under the GPLv2.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
*
- * Copyright (C) 2006-2008 Intel Corporation
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
@@ -123,7 +133,15 @@ move_left:
/* Insert the new_iova into domain rbtree by holding writer lock */
/* Add new node and rebalance tree. */
{
- struct rb_node **entry = &((prev)), *parent = NULL;
+ struct rb_node **entry, *parent = NULL;
+
+ /* If we have 'prev', it's a valid place to start the
+ insertion. Otherwise, start from the root. */
+ if (prev)
+ entry = &prev;
+ else
+ entry = &iovad->rbroot.rb_node;
+
/* Figure out where to put new node */
while (*entry) {
struct iova *this = container_of(*entry,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d9f06fbfa0bf..d986afb7032b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
u32 mask_bits = desc->masked;
if (!desc->msi_attrib.maskbit)
- return;
+ return 0;
mask_bits &= ~mask;
mask_bits |= flag;
pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
- desc->masked = mask_bits;
+
+ return mask_bits;
+}
+
+static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ desc->masked = __msi_mask_irq(desc, mask, flag);
}
/*
@@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
* file. This saves a few milliseconds when initialising devices with lots
* of MSI-X interrupts.
*/
-static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
mask_bits &= ~1;
mask_bits |= flag;
writel(mask_bits, desc->mask_base + offset);
- desc->masked = mask_bits;
+
+ return mask_bits;
+}
+
+static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ desc->masked = __msix_mask_irq(desc, flag);
}
static void msi_set_mask_bit(unsigned irq, u32 flag)
@@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
void __iomem *base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
@@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- writel(msg->address_lo,
- base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(msg->address_hi,
- base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
@@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Configure MSI capability structure */
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
msi_free_irqs(dev);
return ret;
}
@@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev,
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
- if (!entry)
- break;
+ if (!entry) {
+ if (!i)
+ iounmap(base);
+ else
+ msi_free_irqs(dev);
+ /* No enough memory. Don't try again */
+ return -ENOMEM;
+ }
j = entries[i].entry;
entry->msi_attrib.is_msix = 1;
@@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev,
set_irq_msi(entry->irq, entry);
j = entries[i].entry;
entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
msix_mask_irq(entry, 1);
i++;
}
@@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev)
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
+ /* Return the device with MSI unmasked as initial states */
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
- msi_mask_irq(desc, mask, ~mask);
+ /* Keep cached state to be restored */
+ __msi_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev)
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
if (entry->msi_attrib.is_msix) {
- msix_mask_irq(entry, 1);
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev)
void pci_msix_shutdown(struct pci_dev* dev)
{
+ struct msi_desc *entry;
+
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ /* Return the device with MSI-X masked as initial states */
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ /* Keep cached states to be restored */
+ __msix_mask_irq(entry, 1);
+ }
+
msix_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index a0662842550b..de27c1cb5a2b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,11 +6,11 @@
#ifndef MSI_H
#define MSI_H
-#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4
-#define PCI_MSIX_ENTRY_DATA_OFFSET 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4
+#define PCI_MSIX_ENTRY_DATA 8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6c93af5ced18..dbd0f947f497 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1517,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev)
*
* Perform INTx swizzling for a device behind one level of bridge. This is
* required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards.
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
*/
u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
{
- return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
}
int
@@ -2171,7 +2180,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
u16 ctrl;
struct pci_dev *pdev;
- if (dev->subordinate)
+ if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index ece97df4df6d..a928d8ab6bda 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
disable_ecrc_checking(dev);
break;
case ECRC_POLICY_ON:
- enable_ecrc_checking(dev);;
+ enable_ecrc_checking(dev);
break;
default:
return;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 40e75f6a5056..b9d4e95aafba 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1061,8 +1061,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (dev && !dev->is_added) /* new device? */
nr++;
- if ((dev && dev->multifunction) ||
- (!dev && pcibios_scan_all_fns(bus, devfn))) {
+ if (dev && dev->multifunction) {
for (fn = 1; fn < 8; fn++) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 56552d74abea..06b965623962 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
+/* ALi loses some register settings that we cannot then restore */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3);
+/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
+ occur when mode detecting */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index b711fb7181e2..ec80b88bbfec 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return err;
}
+EXPORT_SYMBOL(pci_claim_resource);
#ifdef CONFIG_PCI_QUIRKS
void pci_disable_bridge_window(struct pci_dev *dev)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index eddb0748b0ea..8c02b6c53bdb 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot);
#include <linux/pci_hotplug.h>
/**
* pci_hp_create_link - create symbolic link to the hotplug driver module.
- * @slot: struct pci_slot
+ * @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to create symbolic link to
* the hotplug driver module.
@@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
/**
* pci_hp_remove_link - remove symbolic link to the hotplug driver module.
- * @slot: struct pci_slot
+ * @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to remove symbolic link to
* the hotplug driver module.
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 659421d0ca46..d4ad50d737b0 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -1,7 +1,7 @@
/*
* vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,7 +32,7 @@
#include "i82365.h"
MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_LICENSE("GPL");
#define CARD_MAX_SLOTS 2
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index 812f038e9bda..9b3c15827e5c 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -6,7 +6,7 @@
* NEC VRC4173 CARDU driver for Socket Services
* (This device doesn't support CardBus. it is supporting only 16bit PC Card.)
*
- * Copyright 2002,2003 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright 2002,2003 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -41,7 +41,7 @@
#include "vrc4173_cardu.h"
MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_LICENSE("GPL");
static int vrc4173_cardu_slots;
diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h
index 7d77c74120c1..a7d96018ed8d 100644
--- a/drivers/pcmcia/vrc4173_cardu.h
+++ b/drivers/pcmcia/vrc4173_cardu.h
@@ -5,7 +5,7 @@
* BRIEF MODULE DESCRIPTION
* Include file for NEC VRC4173 CARDU.
*
- * Copyright 2002 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright 2002 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 3ecd7c99d8eb..737fe5d87c40 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -622,11 +622,12 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end)
{
- struct resource *root, *res;
+ struct pci_dev *dev = socket->dev;
+ struct resource *res;
struct pci_bus_region region;
unsigned mask;
- res = socket->dev->resource + PCI_BRIDGE_RESOURCES + nr;
+ res = dev->resource + PCI_BRIDGE_RESOURCES + nr;
/* Already allocated? */
if (res->parent)
return 0;
@@ -636,17 +637,16 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
if (type & IORESOURCE_IO)
mask = ~3;
- res->name = socket->dev->subordinate->name;
+ res->name = dev->subordinate->name;
res->flags = type;
region.start = config_readl(socket, addr_start) & mask;
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
- pcibios_bus_to_resource(socket->dev, res, &region);
- root = pci_find_parent_resource(socket->dev, res);
- if (root && (request_resource(root, res) == 0))
+ pcibios_bus_to_resource(dev, res, &region);
+ if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
return 0;
- dev_printk(KERN_INFO, &socket->dev->dev,
+ dev_printk(KERN_INFO, &dev->dev,
"Preassigned resource %d busy or not available, "
"reconfiguring...\n",
nr);
@@ -672,7 +672,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
return 1;
}
- dev_printk(KERN_INFO, &socket->dev->dev,
+ dev_printk(KERN_INFO, &dev->dev,
"no resource of type %x available, trying to continue...\n",
type);
res->start = res->end = res->flags = 0;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7232fe7104aa..46dad12f952f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -355,6 +355,7 @@ config EEEPC_LAPTOP
depends on INPUT
depends on EXPERIMENTAL
depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
select BACKLIGHT_CLASS_DEVICE
select HWMON
---help---
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4207b26ff990..ec560f16d720 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -31,6 +33,7 @@
#include <linux/input.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#define EEEPC_LAPTOP_VERSION "0.1"
@@ -40,11 +43,6 @@
#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
#define EEEPC_HOTK_HID "ASUS010"
-#define EEEPC_LOG EEEPC_HOTK_FILE ": "
-#define EEEPC_ERR KERN_ERR EEEPC_LOG
-#define EEEPC_WARNING KERN_WARNING EEEPC_LOG
-#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG
-#define EEEPC_INFO KERN_INFO EEEPC_LOG
/*
* Definitions for Asus EeePC
@@ -141,8 +139,10 @@ struct eeepc_hotk {
u16 event_count[128]; /* count for each event */
struct input_dev *inputdev;
u16 *keycode_map;
- struct rfkill *eeepc_wlan_rfkill;
- struct rfkill *eeepc_bluetooth_rfkill;
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+ struct hotplug_slot *hotplug_slot;
};
/* The actual device the driver binds to */
@@ -213,6 +213,15 @@ static struct acpi_driver eeepc_hotk_driver = {
},
};
+/* PCI hotplug ops */
+static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .owner = THIS_MODULE,
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
/* The backlight device /sys/class/backlight */
static struct backlight_device *eeepc_backlight_device;
@@ -274,20 +283,20 @@ static int set_acpi(int cm, int value)
if (method == NULL)
return -ENODEV;
if (write_acpi_int(ehotk->handle, method, value, NULL))
- printk(EEEPC_WARNING "Error writing %s\n", method);
+ pr_warning("Error writing %s\n", method);
}
return 0;
}
static int get_acpi(int cm)
{
- int value = -1;
+ int value = -ENODEV;
if ((ehotk->cm_supported & (0x1 << cm))) {
const char *method = cm_getv[cm];
if (method == NULL)
return -ENODEV;
if (read_acpi_int(ehotk->handle, method, &value))
- printk(EEEPC_WARNING "Error reading %s\n", method);
+ pr_warning("Error reading %s\n", method);
}
return value;
}
@@ -359,13 +368,19 @@ static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_acpi(cm, value);
+ value = set_acpi(cm, value);
+ if (value < 0)
+ return value;
return rv;
}
static ssize_t show_sys_acpi(int cm, char *buf)
{
- return sprintf(buf, "%d\n", get_acpi(cm));
+ int value = get_acpi(cm);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
}
#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
@@ -539,6 +554,28 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
return -EINVAL;
}
+static void cmsg_quirk(int cm, const char *name)
+{
+ int dummy;
+
+ /* Some BIOSes do not report cm although it is avaliable.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(ehotk->cm_supported & (1 << cm))
+ && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS,"
+ " enabling anyway\n", name, 1 << cm);
+ ehotk->cm_supported |= 1 << cm;
+ }
+}
+
+static void cmsg_quirks(void)
+{
+ cmsg_quirk(CM_ASL_LID, "LID");
+ cmsg_quirk(CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(CM_ASL_TPD, "TPD");
+}
+
static int eeepc_hotk_check(void)
{
const struct key_entry *key;
@@ -551,26 +588,24 @@ static int eeepc_hotk_check(void)
if (ehotk->device->status.present) {
if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
&buffer)) {
- printk(EEEPC_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
} else {
- printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
- ehotk->init_flag);
+ pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
}
/* get control methods supported */
if (read_acpi_int(ehotk->handle, "CMSG"
, &ehotk->cm_supported)) {
- printk(EEEPC_ERR
- "Get control methods supported failed\n");
+ pr_err("Get control methods supported failed\n");
return -ENODEV;
} else {
- printk(EEEPC_INFO
- "Get control methods supported: 0x%x\n",
- ehotk->cm_supported);
+ cmsg_quirks();
+ pr_info("Get control methods supported: 0x%x\n",
+ ehotk->cm_supported);
}
ehotk->inputdev = input_allocate_device();
if (!ehotk->inputdev) {
- printk(EEEPC_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
ehotk->inputdev->name = "Asus EeePC extra buttons";
@@ -589,12 +624,12 @@ static int eeepc_hotk_check(void)
}
result = input_register_device(ehotk->inputdev);
if (result) {
- printk(EEEPC_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(ehotk->inputdev);
return 0;
}
} else {
- printk(EEEPC_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
return 0;
@@ -612,6 +647,19 @@ static int notify_brn(void)
return -1;
}
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ int val = get_acpi(CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
@@ -619,7 +667,7 @@ static void eeepc_rfkill_hotplug(void)
bool blocked;
if (!bus) {
- printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
+ pr_warning("Unable to find PCI bus 1?\n");
return;
}
@@ -635,7 +683,7 @@ static void eeepc_rfkill_hotplug(void)
if (dev) {
pci_bus_assign_resources(bus);
if (pci_bus_add_device(dev))
- printk(EEEPC_ERR "Unable to hotplug wifi\n");
+ pr_err("Unable to hotplug wifi\n");
}
} else {
dev = pci_get_slot(bus, 0);
@@ -645,7 +693,7 @@ static void eeepc_rfkill_hotplug(void)
}
}
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
+ rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
@@ -718,8 +766,7 @@ static int eeepc_register_rfkill_notifier(char *node)
eeepc_rfkill_notify,
NULL);
if (ACPI_FAILURE(status))
- printk(EEEPC_WARNING
- "Failed to register notify on %s\n", node);
+ pr_warning("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -738,19 +785,66 @@ static void eeepc_unregister_rfkill_notifier(char *node)
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify);
if (ACPI_FAILURE(status))
- printk(EEEPC_ERR
- "Error removing rfkill notify handler %s\n",
+ pr_err("Error removing rfkill notify handler %s\n",
node);
}
}
+static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
+{
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
+}
+
+static int eeepc_setup_pci_hotplug(void)
+{
+ int ret = -ENOMEM;
+ struct pci_bus *bus = pci_find_bus(0, 1);
+
+ if (!bus) {
+ pr_err("Unable to find wifi PCI bus\n");
+ return -ENODEV;
+ }
+
+ ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!ehotk->hotplug_slot)
+ goto error_slot;
+
+ ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ GFP_KERNEL);
+ if (!ehotk->hotplug_slot->info)
+ goto error_info;
+
+ ehotk->hotplug_slot->private = ehotk;
+ ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
+ ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
+ eeepc_get_adapter_status(ehotk->hotplug_slot,
+ &ehotk->hotplug_slot->info->adapter_status);
+
+ ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
+ if (ret) {
+ pr_err("Unable to register hotplug slot - %d\n", ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ kfree(ehotk->hotplug_slot->info);
+error_info:
+ kfree(ehotk->hotplug_slot);
+ ehotk->hotplug_slot = NULL;
+error_slot:
+ return ret;
+}
+
static int eeepc_hotk_add(struct acpi_device *device)
{
int result;
if (!device)
return -EINVAL;
- printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
+ pr_notice(EEEPC_HOTK_NAME "\n");
ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
if (!ehotk)
return -ENOMEM;
@@ -764,53 +858,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
if (result)
goto ehotk_fail;
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
-
- if (get_acpi(CM_ASL_WLAN) != -1) {
- ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan",
- &device->dev,
- RFKILL_TYPE_WLAN,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_WLAN);
-
- if (!ehotk->eeepc_wlan_rfkill)
- goto wlan_fail;
-
- rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
- get_acpi(CM_ASL_WLAN) != 1);
- result = rfkill_register(ehotk->eeepc_wlan_rfkill);
- if (result)
- goto wlan_fail;
- }
-
- if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
- ehotk->eeepc_bluetooth_rfkill =
- rfkill_alloc("eeepc-bluetooth",
- &device->dev,
- RFKILL_TYPE_BLUETOOTH,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_BLUETOOTH);
-
- if (!ehotk->eeepc_bluetooth_rfkill)
- goto bluetooth_fail;
-
- rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
- result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
- if (result)
- goto bluetooth_fail;
- }
-
return 0;
- bluetooth_fail:
- rfkill_destroy(ehotk->eeepc_bluetooth_rfkill);
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- wlan_fail:
- rfkill_destroy(ehotk->eeepc_wlan_rfkill);
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
ehotk_fail:
kfree(ehotk);
ehotk = NULL;
@@ -823,16 +872,13 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
if (!device || !acpi_driver_data(device))
return -EINVAL;
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
-
kfree(ehotk);
return 0;
}
static int eeepc_hotk_resume(struct acpi_device *device)
{
- if (ehotk->eeepc_wlan_rfkill) {
+ if (ehotk->wlan_rfkill) {
bool wlan;
/* Workaround - it seems that _PTS disables the wireless
@@ -844,14 +890,13 @@ static int eeepc_hotk_resume(struct acpi_device *device)
wlan = get_acpi(CM_ASL_WLAN);
set_acpi(CM_ASL_WLAN, wlan);
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
- wlan != 1);
+ rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
eeepc_rfkill_hotplug();
}
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ if (ehotk->bluetooth_rfkill)
+ rfkill_set_sw_state(ehotk->bluetooth_rfkill,
get_acpi(CM_ASL_BLUETOOTH) != 1);
return 0;
@@ -973,10 +1018,16 @@ static void eeepc_backlight_exit(void)
static void eeepc_rfkill_exit(void)
{
- if (ehotk->eeepc_wlan_rfkill)
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
+ if (ehotk->wlan_rfkill)
+ rfkill_unregister(ehotk->wlan_rfkill);
+ if (ehotk->bluetooth_rfkill)
+ rfkill_unregister(ehotk->bluetooth_rfkill);
+ if (ehotk->wwan3g_rfkill)
+ rfkill_unregister(ehotk->wwan3g_rfkill);
+ if (ehotk->hotplug_slot)
+ pci_hp_deregister(ehotk->hotplug_slot);
}
static void eeepc_input_exit(void)
@@ -1011,6 +1062,75 @@ static void __exit eeepc_laptop_exit(void)
platform_driver_unregister(&platform_driver);
}
+static int eeepc_new_rfkill(struct rfkill **rfkill,
+ const char *name, struct device *dev,
+ enum rfkill_type type, int cm)
+{
+ int result;
+
+ result = get_acpi(cm);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, dev, type,
+ &eeepc_rfkill_ops, (void *)(unsigned long)cm);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+
+static int eeepc_rfkill_init(struct device *dev)
+{
+ int result = 0;
+
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+
+ result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
+ "eeepc-wlan", dev,
+ RFKILL_TYPE_WLAN, CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
+ "eeepc-bluetooth", dev,
+ RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
+ "eeepc-wwan3g", dev,
+ RFKILL_TYPE_WWAN, CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_setup_pci_hotplug();
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit();
+ return result;
+}
+
static int eeepc_backlight_init(struct device *dev)
{
struct backlight_device *bd;
@@ -1018,8 +1138,7 @@ static int eeepc_backlight_init(struct device *dev)
bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
NULL, &eeepcbl_ops);
if (IS_ERR(bd)) {
- printk(EEEPC_ERR
- "Could not register eeepc backlight device\n");
+ pr_err("Could not register eeepc backlight device\n");
eeepc_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -1038,8 +1157,7 @@ static int eeepc_hwmon_init(struct device *dev)
hwmon = hwmon_device_register(dev);
if (IS_ERR(hwmon)) {
- printk(EEEPC_ERR
- "Could not register eeepc hwmon device\n");
+ pr_err("Could not register eeepc hwmon device\n");
eeepc_hwmon_device = NULL;
return PTR_ERR(hwmon);
}
@@ -1065,19 +1183,6 @@ static int __init eeepc_laptop_init(void)
acpi_bus_unregister_driver(&eeepc_hotk_driver);
return -ENODEV;
}
- dev = acpi_get_physical_device(ehotk->device->handle);
-
- if (!acpi_video_backlight_support()) {
- result = eeepc_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(EEEPC_INFO "Backlight controlled by ACPI video "
- "driver\n");
-
- result = eeepc_hwmon_init(dev);
- if (result)
- goto fail_hwmon;
eeepc_enable_camera();
@@ -1097,7 +1202,33 @@ static int __init eeepc_laptop_init(void)
&platform_attribute_group);
if (result)
goto fail_sysfs;
+
+ dev = &platform_device->dev;
+
+ if (!acpi_video_backlight_support()) {
+ result = eeepc_backlight_init(dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video "
+ "driver\n");
+
+ result = eeepc_hwmon_init(dev);
+ if (result)
+ goto fail_hwmon;
+
+ result = eeepc_rfkill_init(dev);
+ if (result)
+ goto fail_rfkill;
+
return 0;
+fail_rfkill:
+ eeepc_hwmon_exit();
+fail_hwmon:
+ eeepc_backlight_exit();
+fail_backlight:
+ sysfs_remove_group(&platform_device->dev.kobj,
+ &platform_attribute_group);
fail_sysfs:
platform_device_del(platform_device);
fail_platform_device2:
@@ -1105,12 +1236,7 @@ fail_platform_device2:
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
- eeepc_hwmon_exit();
-fail_hwmon:
- eeepc_backlight_exit();
-fail_backlight:
eeepc_input_exit();
- eeepc_rfkill_exit();
return result;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7eda34838bfe..bdbc4f73fcdc 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -43,6 +43,13 @@ config BATTERY_DS2760
help
Say Y here to enable support for batteries with ds2760 chip.
+config BATTERY_DS2782
+ tristate "DS2782 standalone gas-gauge"
+ depends on I2C
+ help
+ Say Y here to enable support for the DS2782 standalone battery
+ gas-gauge.
+
config BATTERY_PMU
tristate "Apple PMU battery"
depends on PPC32 && ADB_PMU
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index daf3179689aa..380d17c9ae29 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
+obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
new file mode 100644
index 000000000000..da14f374cb60
--- /dev/null
+++ b/drivers/power/ds2782_battery.c
@@ -0,0 +1,330 @@
+/*
+ * I2C client/driver for the Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC
+ *
+ * Copyright (C) 2009 Bluewater Systems Ltd
+ *
+ * Author: Ryan Mallon <ryan@bluewatersys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/swab.h>
+#include <linux/i2c.h>
+#include <linux/idr.h>
+#include <linux/power_supply.h>
+
+#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */
+
+#define DS2782_REG_VOLT_MSB 0x0c
+#define DS2782_REG_TEMP_MSB 0x0a
+#define DS2782_REG_CURRENT_MSB 0x0e
+
+/* EEPROM Block */
+#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */
+
+/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
+#define DS2782_CURRENT_UNITS 1563
+
+#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery)
+
+struct ds2782_info {
+ struct i2c_client *client;
+ struct power_supply battery;
+ int id;
+};
+
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_lock);
+
+static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(info->client, reg);
+ if (ret < 0) {
+ dev_err(&info->client->dev, "register read failed\n");
+ return ret;
+ }
+
+ *val = ret;
+ return 0;
+}
+
+static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
+ s16 *val)
+{
+ int ret;
+
+ ret = swab16(i2c_smbus_read_word_data(info->client, reg_msb));
+ if (ret < 0) {
+ dev_err(&info->client->dev, "register read failed\n");
+ return ret;
+ }
+
+ *val = ret;
+ return 0;
+}
+
+static int ds2782_get_temp(struct ds2782_info *info, int *temp)
+{
+ s16 raw;
+ int err;
+
+ /*
+ * Temperature is measured in units of 0.125 degrees celcius, the
+ * power_supply class measures temperature in tenths of degrees
+ * celsius. The temperature value is stored as a 10 bit number, plus
+ * sign in the upper bits of a 16 bit register.
+ */
+ err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw);
+ if (err)
+ return err;
+ *temp = ((raw / 32) * 125) / 100;
+ return 0;
+}
+
+static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
+{
+ int sense_res;
+ int err;
+ u8 sense_res_raw;
+ s16 raw;
+
+ /*
+ * The units of measurement for current are dependent on the value of
+ * the sense resistor.
+ */
+ err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
+ if (err)
+ return err;
+ if (sense_res_raw == 0) {
+ dev_err(&info->client->dev, "sense resistor value is 0\n");
+ return -ENXIO;
+ }
+ sense_res = 1000 / sense_res_raw;
+
+ dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n",
+ sense_res);
+ err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw);
+ if (err)
+ return err;
+ *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res);
+ return 0;
+}
+
+static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
+{
+ s16 raw;
+ int err;
+
+ /*
+ * Voltage is measured in units of 4.88mV. The voltage is stored as
+ * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ */
+ err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw);
+ if (err)
+ return err;
+ *voltage_uA = (raw / 32) * 4800;
+ return 0;
+}
+
+static int ds2782_get_capacity(struct ds2782_info *info, int *capacity)
+{
+ int err;
+ u8 raw;
+
+ err = ds2782_read_reg(info, DS2782_REG_RARC, &raw);
+ if (err)
+ return err;
+ *capacity = raw;
+ return raw;
+}
+
+static int ds2782_get_status(struct ds2782_info *info, int *status)
+{
+ int err;
+ int current_uA;
+ int capacity;
+
+ err = ds2782_get_current(info, &current_uA);
+ if (err)
+ return err;
+
+ err = ds2782_get_capacity(info, &capacity);
+ if (err)
+ return err;
+
+ if (capacity == 100)
+ *status = POWER_SUPPLY_STATUS_FULL;
+ else if (current_uA == 0)
+ *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (current_uA < 0)
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+
+ return 0;
+}
+
+static int ds2782_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct ds2782_info *info = to_ds2782_info(psy);
+ int ret;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = ds2782_get_status(info, &val->intval);
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = ds2782_get_capacity(info, &val->intval);
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = ds2782_get_voltage(info, &val->intval);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = ds2782_get_current(info, &val->intval);
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = ds2782_get_temp(info, &val->intval);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property ds2782_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static void ds2782_power_supply_init(struct power_supply *battery)
+{
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = ds2782_battery_props;
+ battery->num_properties = ARRAY_SIZE(ds2782_battery_props);
+ battery->get_property = ds2782_battery_get_property;
+ battery->external_power_changed = NULL;
+}
+
+static int ds2782_battery_remove(struct i2c_client *client)
+{
+ struct ds2782_info *info = i2c_get_clientdata(client);
+
+ power_supply_unregister(&info->battery);
+ kfree(info->battery.name);
+
+ mutex_lock(&battery_lock);
+ idr_remove(&battery_id, info->id);
+ mutex_unlock(&battery_lock);
+
+ i2c_set_clientdata(client, info);
+
+ kfree(info);
+ return 0;
+}
+
+static int ds2782_battery_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ds2782_info *info;
+ int ret;
+ int num;
+
+ /* Get an ID for this battery */
+ ret = idr_pre_get(&battery_id, GFP_KERNEL);
+ if (ret == 0) {
+ ret = -ENOMEM;
+ goto fail_id;
+ }
+
+ mutex_lock(&battery_lock);
+ ret = idr_get_new(&battery_id, client, &num);
+ mutex_unlock(&battery_lock);
+ if (ret < 0)
+ goto fail_id;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto fail_info;
+ }
+
+ info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num);
+ if (!info->battery.name) {
+ ret = -ENOMEM;
+ goto fail_name;
+ }
+
+ i2c_set_clientdata(client, info);
+ info->client = client;
+ ds2782_power_supply_init(&info->battery);
+
+ ret = power_supply_register(&client->dev, &info->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed to register battery\n");
+ goto fail_register;
+ }
+
+ return 0;
+
+fail_register:
+ kfree(info->battery.name);
+fail_name:
+ i2c_set_clientdata(client, info);
+ kfree(info);
+fail_info:
+ mutex_lock(&battery_lock);
+ idr_remove(&battery_id, num);
+ mutex_unlock(&battery_lock);
+fail_id:
+ return ret;
+}
+
+static const struct i2c_device_id ds2782_id[] = {
+ {"ds2782", 0},
+ {},
+};
+
+static struct i2c_driver ds2782_battery_driver = {
+ .driver = {
+ .name = "ds2782-battery",
+ },
+ .probe = ds2782_battery_probe,
+ .remove = ds2782_battery_remove,
+ .id_table = ds2782_id,
+};
+
+static int __init ds2782_init(void)
+{
+ return i2c_add_driver(&ds2782_battery_driver);
+}
+module_init(ds2782_init);
+
+static void __exit ds2782_exit(void)
+{
+ i2c_del_driver(&ds2782_battery_driver);
+}
+module_exit(ds2782_exit);
+
+MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>");
+MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 5fbca2681baa..8fefe5a73558 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -8,8 +8,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/types.h>
#include <linux/err.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/jiffies.h>
@@ -35,6 +38,7 @@
#define BAT_STAT_AC 0x10
#define BAT_STAT_CHARGING 0x20
#define BAT_STAT_DISCHARGING 0x40
+#define BAT_STAT_TRICKLE 0x80
#define BAT_ERR_INFOFAIL 0x02
#define BAT_ERR_OVERVOLTAGE 0x04
@@ -89,7 +93,7 @@ static char bat_serial[17]; /* Ick */
static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte)
{
if (olpc_platform_info.ecver > 0x44) {
- if (ec_byte & BAT_STAT_CHARGING)
+ if (ec_byte & (BAT_STAT_CHARGING | BAT_STAT_TRICKLE))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (ec_byte & BAT_STAT_DISCHARGING)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
@@ -219,7 +223,8 @@ static int olpc_bat_get_property(struct power_supply *psy,
It doesn't matter though -- the EC will return the last-known
information, and it's as if we just ran that _little_ bit faster
and managed to read it out before the battery went away. */
- if (!(ec_byte & BAT_STAT_PRESENT) && psp != POWER_SUPPLY_PROP_PRESENT)
+ if (!(ec_byte & (BAT_STAT_PRESENT | BAT_STAT_TRICKLE)) &&
+ psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
switch (psp) {
@@ -228,8 +233,17 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ if (ec_byte & BAT_STAT_TRICKLE)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ else if (ec_byte & BAT_STAT_CHARGING)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = !!(ec_byte & BAT_STAT_PRESENT);
+ val->intval = !!(ec_byte & (BAT_STAT_PRESENT |
+ BAT_STAT_TRICKLE));
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -272,6 +286,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
return ret;
val->intval = ec_byte;
break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (ec_byte & BAT_STAT_FULL)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (ec_byte & BAT_STAT_LOW)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ break;
case POWER_SUPPLY_PROP_TEMP:
ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
@@ -311,12 +333,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
static enum power_supply_property olpc_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -334,21 +358,21 @@ static ssize_t olpc_bat_eeprom_read(struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
uint8_t ec_byte;
- int ret, end;
+ int ret;
+ int i;
if (off >= EEPROM_SIZE)
return 0;
if (off + count > EEPROM_SIZE)
count = EEPROM_SIZE - off;
- end = EEPROM_START + off + count;
- for (ec_byte = EEPROM_START + off; ec_byte < end; ec_byte++) {
- ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1,
- &buf[ec_byte - EEPROM_START], 1);
+ for (i = 0; i < count; i++) {
+ ec_byte = EEPROM_START + off + i;
+ ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &buf[i], 1);
if (ret) {
- printk(KERN_ERR "olpc-battery: EC command "
- "EC_BAT_EEPROM @ 0x%x failed -"
- " %d!\n", ec_byte, ret);
+ pr_err("olpc-battery: "
+ "EC_BAT_EEPROM cmd @ 0x%x failed - %d!\n",
+ ec_byte, ret);
return -EIO;
}
}
@@ -366,6 +390,29 @@ static struct bin_attribute olpc_bat_eeprom = {
.read = olpc_bat_eeprom_read,
};
+/* Allow userspace to see the specific error value pulled from the EC */
+
+static ssize_t olpc_bat_error_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ uint8_t ec_byte;
+ ssize_t ret;
+
+ ret = olpc_ec_cmd(EC_BAT_ERRCODE, NULL, 0, &ec_byte, 1);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ec_byte);
+}
+
+static struct device_attribute olpc_bat_error = {
+ .attr = {
+ .name = "error",
+ .mode = S_IRUGO,
+ },
+ .show = olpc_bat_error_read,
+};
+
/*********************************************************************
* Initialisation
*********************************************************************/
@@ -429,8 +476,14 @@ static int __init olpc_bat_init(void)
if (ret)
goto eeprom_failed;
+ ret = device_create_file(olpc_bat.dev, &olpc_bat_error);
+ if (ret)
+ goto error_failed;
+
goto success;
+error_failed:
+ device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
eeprom_failed:
power_supply_unregister(&olpc_bat);
battery_failed:
@@ -443,6 +496,7 @@ success:
static void __exit olpc_bat_exit(void)
{
+ device_remove_file(olpc_bat.dev, &olpc_bat_error);
device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
power_supply_unregister(&olpc_bat);
power_supply_unregister(&olpc_ac);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index da73591017f9..08144393d64b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -43,6 +43,9 @@ static ssize_t power_supply_show_property(struct device *dev,
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
};
+ static char *charge_type[] = {
+ "Unknown", "N/A", "Trickle", "Fast"
+ };
static char *health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold",
@@ -51,6 +54,9 @@ static ssize_t power_supply_show_property(struct device *dev,
"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
"LiMn"
};
+ static char *capacity_level_text[] = {
+ "Unknown", "Critical", "Low", "Normal", "High", "Full"
+ };
ssize_t ret;
struct power_supply *psy = dev_get_drvdata(dev);
const ptrdiff_t off = attr - power_supply_attrs;
@@ -67,10 +73,14 @@ static ssize_t power_supply_show_property(struct device *dev,
if (off == POWER_SUPPLY_PROP_STATUS)
return sprintf(buf, "%s\n", status_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE)
+ return sprintf(buf, "%s\n", charge_type[value.intval]);
else if (off == POWER_SUPPLY_PROP_HEALTH)
return sprintf(buf, "%s\n", health_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_TECHNOLOGY)
return sprintf(buf, "%s\n", technology_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
+ return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
return sprintf(buf, "%s\n", value.strval);
@@ -81,6 +91,7 @@ static ssize_t power_supply_show_property(struct device *dev,
static struct device_attribute power_supply_attrs[] = {
/* Properties of type `int' */
POWER_SUPPLY_ATTR(status),
+ POWER_SUPPLY_ATTR(charge_type),
POWER_SUPPLY_ATTR(health),
POWER_SUPPLY_ATTR(present),
POWER_SUPPLY_ATTR(online),
@@ -109,6 +120,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(energy_now),
POWER_SUPPLY_ATTR(energy_avg),
POWER_SUPPLY_ATTR(capacity),
+ POWER_SUPPLY_ATTR(capacity_level),
POWER_SUPPLY_ATTR(temp),
POWER_SUPPLY_ATTR(temp_ambient),
POWER_SUPPLY_ATTR(time_to_empty_now),
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f4317798e47c..da7483ef32b3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -117,4 +117,11 @@ config REGULATOR_LP3971
Say Y here to support the voltage regulators and convertors
on National Semiconductors LP3971 PMIC
+config REGULATOR_PCAP
+ tristate "PCAP2 regulator driver"
+ depends on EZX_PCAP
+ help
+ This driver provides support for the voltage regulators of the
+ PCAP2 PMIC.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 4d762c4cccfd..3a9748ff860b 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
+obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 98c3a74e9949..eed62ad73e09 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -37,7 +37,7 @@ static int has_full_constraints;
*/
struct regulator_map {
struct list_head list;
- struct device *dev;
+ const char *dev_name; /* The dev_name() for the consumer */
const char *supply;
struct regulator_dev *regulator;
};
@@ -857,23 +857,33 @@ out:
* set_consumer_device_supply: Bind a regulator to a symbolic supply
* @rdev: regulator source
* @consumer_dev: device the supply applies to
+ * @consumer_dev_name: dev_name() string for device supply applies to
* @supply: symbolic name for supply
*
* Allows platform initialisation code to map physical regulator
* sources to symbolic names for supplies for use by devices. Devices
* should use these symbolic names to request regulators, avoiding the
* need to provide board-specific regulator names as platform data.
+ *
+ * Only one of consumer_dev and consumer_dev_name may be specified.
*/
static int set_consumer_device_supply(struct regulator_dev *rdev,
- struct device *consumer_dev, const char *supply)
+ struct device *consumer_dev, const char *consumer_dev_name,
+ const char *supply)
{
struct regulator_map *node;
+ if (consumer_dev && consumer_dev_name)
+ return -EINVAL;
+
+ if (!consumer_dev_name && consumer_dev)
+ consumer_dev_name = dev_name(consumer_dev);
+
if (supply == NULL)
return -EINVAL;
list_for_each_entry(node, &regulator_map_list, list) {
- if (consumer_dev != node->dev)
+ if (consumer_dev_name != node->dev_name)
continue;
if (strcmp(node->supply, supply) != 0)
continue;
@@ -891,25 +901,38 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
return -ENOMEM;
node->regulator = rdev;
- node->dev = consumer_dev;
+ node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
node->supply = supply;
+ if (node->dev_name == NULL) {
+ kfree(node);
+ return -ENOMEM;
+ }
+
list_add(&node->list, &regulator_map_list);
return 0;
}
static void unset_consumer_device_supply(struct regulator_dev *rdev,
- struct device *consumer_dev)
+ const char *consumer_dev_name, struct device *consumer_dev)
{
struct regulator_map *node, *n;
+ if (consumer_dev && !consumer_dev_name)
+ consumer_dev_name = dev_name(consumer_dev);
+
list_for_each_entry_safe(node, n, &regulator_map_list, list) {
- if (rdev == node->regulator &&
- consumer_dev == node->dev) {
- list_del(&node->list);
- kfree(node);
- return;
- }
+ if (rdev != node->regulator)
+ continue;
+
+ if (consumer_dev_name && node->dev_name &&
+ strcmp(consumer_dev_name, node->dev_name))
+ continue;
+
+ list_del(&node->list);
+ kfree(node->dev_name);
+ kfree(node);
+ return;
}
}
@@ -920,6 +943,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
list_for_each_entry_safe(node, n, &regulator_map_list, list) {
if (rdev == node->regulator) {
list_del(&node->list);
+ kfree(node->dev_name);
kfree(node);
return;
}
@@ -1019,17 +1043,25 @@ struct regulator *regulator_get(struct device *dev, const char *id)
struct regulator_dev *rdev;
struct regulator_map *map;
struct regulator *regulator = ERR_PTR(-ENODEV);
+ const char *devname = NULL;
if (id == NULL) {
printk(KERN_ERR "regulator: get() with no identifier\n");
return regulator;
}
+ if (dev)
+ devname = dev_name(dev);
+
mutex_lock(&regulator_list_mutex);
list_for_each_entry(map, &regulator_map_list, list) {
- if (dev == map->dev &&
- strcmp(map->supply, id) == 0) {
+ /* If the mapping has a device set up it must match */
+ if (map->dev_name &&
+ (!devname || strcmp(map->dev_name, devname)))
+ continue;
+
+ if (strcmp(map->supply, id) == 0) {
rdev = map->regulator;
goto found;
}
@@ -2067,11 +2099,13 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev,
+ init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
for (--i; i >= 0; i--)
unset_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev);
+ init_data->consumer_supplies[i].dev_name,
+ init_data->consumer_supplies[i].dev);
goto scrub;
}
}
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
new file mode 100644
index 000000000000..fdb90940ef0f
--- /dev/null
+++ b/drivers/regulator/pcap-regulator.c
@@ -0,0 +1,318 @@
+/*
+ * PCAP2 Regulator Driver
+ *
+ * Copyright (c) 2009 Daniel Ribeiro <drwyrm@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/ezx-pcap.h>
+
+static const u16 V1_table[] = {
+ 2775, 1275, 1600, 1725, 1825, 1925, 2075, 2275,
+};
+
+static const u16 V2_table[] = {
+ 2500, 2775,
+};
+
+static const u16 V3_table[] = {
+ 1075, 1275, 1550, 1725, 1876, 1950, 2075, 2275,
+};
+
+static const u16 V4_table[] = {
+ 1275, 1550, 1725, 1875, 1950, 2075, 2275, 2775,
+};
+
+static const u16 V5_table[] = {
+ 1875, 2275, 2475, 2775,
+};
+
+static const u16 V6_table[] = {
+ 2475, 2775,
+};
+
+static const u16 V7_table[] = {
+ 1875, 2775,
+};
+
+#define V8_table V4_table
+
+static const u16 V9_table[] = {
+ 1575, 1875, 2475, 2775,
+};
+
+static const u16 V10_table[] = {
+ 5000,
+};
+
+static const u16 VAUX1_table[] = {
+ 1875, 2475, 2775, 3000,
+};
+
+#define VAUX2_table VAUX1_table
+
+static const u16 VAUX3_table[] = {
+ 1200, 1200, 1200, 1200, 1400, 1600, 1800, 2000,
+ 2200, 2400, 2600, 2800, 3000, 3200, 3400, 3600,
+};
+
+static const u16 VAUX4_table[] = {
+ 1800, 1800, 3000, 5000,
+};
+
+static const u16 VSIM_table[] = {
+ 1875, 3000,
+};
+
+static const u16 VSIM2_table[] = {
+ 1875,
+};
+
+static const u16 VVIB_table[] = {
+ 1300, 1800, 2000, 3000,
+};
+
+static const u16 SW1_table[] = {
+ 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,
+ 1300, 1350, 1400, 1450, 1500, 1600, 1875, 2250,
+};
+
+#define SW2_table SW1_table
+
+static const u16 SW3_table[] = {
+ 4000, 4500, 5000, 5500,
+};
+
+struct pcap_regulator {
+ const u8 reg;
+ const u8 en;
+ const u8 index;
+ const u8 stby;
+ const u8 lowpwr;
+ const u8 n_voltages;
+ const u16 *voltage_table;
+};
+
+#define NA 0xff
+
+#define VREG_INFO(_vreg, _reg, _en, _index, _stby, _lowpwr) \
+ [_vreg] = { \
+ .reg = _reg, \
+ .en = _en, \
+ .index = _index, \
+ .stby = _stby, \
+ .lowpwr = _lowpwr, \
+ .n_voltages = ARRAY_SIZE(_vreg##_table), \
+ .voltage_table = _vreg##_table, \
+ }
+
+static struct pcap_regulator vreg_table[] = {
+ VREG_INFO(V1, PCAP_REG_VREG1, 1, 2, 18, 0),
+ VREG_INFO(V2, PCAP_REG_VREG1, 5, 6, 19, 22),
+ VREG_INFO(V3, PCAP_REG_VREG1, 7, 8, 20, 23),
+ VREG_INFO(V4, PCAP_REG_VREG1, 11, 12, 21, 24),
+ /* V5 STBY and LOWPWR are on PCAP_REG_VREG2 */
+ VREG_INFO(V5, PCAP_REG_VREG1, 15, 16, 12, 19),
+
+ VREG_INFO(V6, PCAP_REG_VREG2, 1, 2, 14, 20),
+ VREG_INFO(V7, PCAP_REG_VREG2, 3, 4, 15, 21),
+ VREG_INFO(V8, PCAP_REG_VREG2, 5, 6, 16, 22),
+ VREG_INFO(V9, PCAP_REG_VREG2, 9, 10, 17, 23),
+ VREG_INFO(V10, PCAP_REG_VREG2, 10, NA, 18, 24),
+
+ VREG_INFO(VAUX1, PCAP_REG_AUXVREG, 1, 2, 22, 23),
+ /* VAUX2 ... VSIM2 STBY and LOWPWR are on PCAP_REG_LOWPWR */
+ VREG_INFO(VAUX2, PCAP_REG_AUXVREG, 4, 5, 0, 1),
+ VREG_INFO(VAUX3, PCAP_REG_AUXVREG, 7, 8, 2, 3),
+ VREG_INFO(VAUX4, PCAP_REG_AUXVREG, 12, 13, 4, 5),
+ VREG_INFO(VSIM, PCAP_REG_AUXVREG, 17, 18, NA, 6),
+ VREG_INFO(VSIM2, PCAP_REG_AUXVREG, 16, NA, NA, 7),
+ VREG_INFO(VVIB, PCAP_REG_AUXVREG, 19, 20, NA, NA),
+
+ VREG_INFO(SW1, PCAP_REG_SWCTRL, 1, 2, NA, NA),
+ VREG_INFO(SW2, PCAP_REG_SWCTRL, 6, 7, NA, NA),
+ /* SW3 STBY is on PCAP_REG_AUXVREG */
+ VREG_INFO(SW3, PCAP_REG_SWCTRL, 11, 12, 24, NA),
+
+ /* SWxS used to control SWx voltage on standby */
+/* VREG_INFO(SW1S, PCAP_REG_LOWPWR, NA, 12, NA, NA),
+ VREG_INFO(SW2S, PCAP_REG_LOWPWR, NA, 20, NA, NA), */
+};
+
+static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ void *pcap = rdev_get_drvdata(rdev);
+ int uV;
+ u8 i;
+
+ /* the regulator doesn't support voltage switching */
+ if (vreg->n_voltages == 1)
+ return -EINVAL;
+
+ for (i = 0; i < vreg->n_voltages; i++) {
+ /* For V1 the first is not the best match */
+ if (i == 0 && rdev_get_id(rdev) == V1)
+ i = 1;
+ else if (i + 1 == vreg->n_voltages && rdev_get_id(rdev) == V1)
+ i = 0;
+
+ uV = vreg->voltage_table[i] * 1000;
+ if (min_uV <= uV && uV <= max_uV)
+ return ezx_pcap_set_bits(pcap, vreg->reg,
+ (vreg->n_voltages - 1) << vreg->index,
+ i << vreg->index);
+
+ if (i == 0 && rdev_get_id(rdev) == V1)
+ i = vreg->n_voltages - 1;
+ }
+
+ /* the requested voltage range is not supported by this regulator */
+ return -EINVAL;
+}
+
+static int pcap_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ void *pcap = rdev_get_drvdata(rdev);
+ u32 tmp;
+ int mV;
+
+ if (vreg->n_voltages == 1)
+ return vreg->voltage_table[0] * 1000;
+
+ ezx_pcap_read(pcap, vreg->reg, &tmp);
+ tmp = ((tmp >> vreg->index) & (vreg->n_voltages - 1));
+ mV = vreg->voltage_table[tmp];
+
+ return mV * 1000;
+}
+
+static int pcap_regulator_enable(struct regulator_dev *rdev)
+{
+ struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ void *pcap = rdev_get_drvdata(rdev);
+
+ if (vreg->en == NA)
+ return -EINVAL;
+
+ return ezx_pcap_set_bits(pcap, vreg->reg, 1 << vreg->en, 1 << vreg->en);
+}
+
+static int pcap_regulator_disable(struct regulator_dev *rdev)
+{
+ struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ void *pcap = rdev_get_drvdata(rdev);
+
+ if (vreg->en == NA)
+ return -EINVAL;
+
+ return ezx_pcap_set_bits(pcap, vreg->reg, 1 << vreg->en, 0);
+}
+
+static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+ void *pcap = rdev_get_drvdata(rdev);
+ u32 tmp;
+
+ if (vreg->en == NA)
+ return -EINVAL;
+
+ ezx_pcap_read(pcap, vreg->reg, &tmp);
+ return (tmp >> vreg->en) & 1;
+}
+
+static int pcap_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int index)
+{
+ struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
+
+ return vreg->voltage_table[index] * 1000;
+}
+
+static struct regulator_ops pcap_regulator_ops = {
+ .list_voltage = pcap_regulator_list_voltage,
+ .set_voltage = pcap_regulator_set_voltage,
+ .get_voltage = pcap_regulator_get_voltage,
+ .enable = pcap_regulator_enable,
+ .disable = pcap_regulator_disable,
+ .is_enabled = pcap_regulator_is_enabled,
+};
+
+#define VREG(_vreg) \
+ [_vreg] = { \
+ .name = #_vreg, \
+ .id = _vreg, \
+ .n_voltages = ARRAY_SIZE(_vreg##_table), \
+ .ops = &pcap_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+static struct regulator_desc pcap_regulators[] = {
+ VREG(V1), VREG(V2), VREG(V3), VREG(V4), VREG(V5), VREG(V6), VREG(V7),
+ VREG(V8), VREG(V9), VREG(V10), VREG(VAUX1), VREG(VAUX2), VREG(VAUX3),
+ VREG(VAUX4), VREG(VSIM), VREG(VSIM2), VREG(VVIB), VREG(SW1), VREG(SW2),
+};
+
+static int __devinit pcap_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev;
+ void *pcap = platform_get_drvdata(pdev);
+
+ rdev = regulator_register(&pcap_regulators[pdev->id], &pdev->dev,
+ pdev->dev.platform_data, pcap);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit pcap_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+
+ return 0;
+}
+
+static struct platform_driver pcap_regulator_driver = {
+ .driver = {
+ .name = "pcap-regulator",
+ },
+ .probe = pcap_regulator_probe,
+ .remove = __devexit_p(pcap_regulator_remove),
+};
+
+static int __init pcap_regulator_init(void)
+{
+ return platform_driver_register(&pcap_regulator_driver);
+}
+
+static void __exit pcap_regulator_exit(void)
+{
+ platform_driver_unregister(&pcap_regulator_driver);
+}
+
+module_init(pcap_regulator_init);
+module_exit(pcap_regulator_exit);
+
+MODULE_AUTHOR("Daniel Ribeiro <drwyrm@gmail.com>");
+MODULE_DESCRIPTION("PCAP2 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index e7db5664722e..144110788fd2 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -27,71 +27,81 @@ struct virtual_consumer_data {
unsigned int mode;
};
-static void update_voltage_constraints(struct virtual_consumer_data *data)
+static void update_voltage_constraints(struct device *dev,
+ struct virtual_consumer_data *data)
{
int ret;
if (data->min_uV && data->max_uV
&& data->min_uV <= data->max_uV) {
+ dev_dbg(dev, "Requesting %d-%duV\n",
+ data->min_uV, data->max_uV);
ret = regulator_set_voltage(data->regulator,
- data->min_uV, data->max_uV);
+ data->min_uV, data->max_uV);
if (ret != 0) {
- printk(KERN_ERR "regulator_set_voltage() failed: %d\n",
- ret);
+ dev_err(dev,
+ "regulator_set_voltage() failed: %d\n", ret);
return;
}
}
if (data->min_uV && data->max_uV && !data->enabled) {
+ dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = 1;
else
- printk(KERN_ERR "regulator_enable() failed: %d\n",
+ dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uV && data->max_uV) && data->enabled) {
+ dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = 0;
else
- printk(KERN_ERR "regulator_disable() failed: %d\n",
+ dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
-static void update_current_limit_constraints(struct virtual_consumer_data
- *data)
+static void update_current_limit_constraints(struct device *dev,
+ struct virtual_consumer_data *data)
{
int ret;
if (data->max_uA
&& data->min_uA <= data->max_uA) {
+ dev_dbg(dev, "Requesting %d-%duA\n",
+ data->min_uA, data->max_uA);
ret = regulator_set_current_limit(data->regulator,
data->min_uA, data->max_uA);
if (ret != 0) {
- pr_err("regulator_set_current_limit() failed: %d\n",
- ret);
+ dev_err(dev,
+ "regulator_set_current_limit() failed: %d\n",
+ ret);
return;
}
}
if (data->max_uA && !data->enabled) {
+ dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = 1;
else
- printk(KERN_ERR "regulator_enable() failed: %d\n",
+ dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uA && data->max_uA) && data->enabled) {
+ dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = 0;
else
- printk(KERN_ERR "regulator_disable() failed: %d\n",
+ dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
@@ -115,7 +125,7 @@ static ssize_t set_min_uV(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->min_uV = val;
- update_voltage_constraints(data);
+ update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -141,7 +151,7 @@ static ssize_t set_max_uV(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->max_uV = val;
- update_voltage_constraints(data);
+ update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -167,7 +177,7 @@ static ssize_t set_min_uA(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->min_uA = val;
- update_current_limit_constraints(data);
+ update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -193,7 +203,7 @@ static ssize_t set_max_uA(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->max_uA = val;
- update_current_limit_constraints(data);
+ update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -285,6 +295,8 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
drvdata->regulator = regulator_get(&pdev->dev, reg_id);
if (IS_ERR(drvdata->regulator)) {
ret = PTR_ERR(drvdata->regulator);
+ dev_err(&pdev->dev, "Failed to obtain supply '%s': %d\n",
+ reg_id, ret);
goto err;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 17a00b0fafd1..768bd0e5b48b 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1419,6 +1419,8 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
{
struct platform_device *pdev;
int ret;
+ if (reg < 0 || reg >= NUM_WM8350_REGULATORS)
+ return -EINVAL;
if (wm8350->pmic.pdev[reg])
return -EBUSY;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 81adbdbd5042..139b7834d6e3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -500,6 +500,15 @@ config RTC_DRV_M48T59
This driver can also be built as a module, if so, the module
will be called "rtc-m48t59".
+config RTC_DRV_MSM6242
+ tristate "Oki MSM6242"
+ help
+ If you say yes here you get support for the Oki MSM6242
+ timekeeping chip. It is used in some Amiga models (e.g. A2000).
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-msm6242.
+
config RTC_DRV_BQ4802
tristate "TI BQ4802"
help
@@ -509,6 +518,16 @@ config RTC_DRV_BQ4802
This driver can also be built as a module. If so, the module
will be called rtc-bq4802.
+config RTC_DRV_RP5C01
+ tristate "Ricoh RP5C01"
+ help
+ If you say yes here you get support for the Ricoh RP5C01
+ timekeeping chip. It is used in some Amiga models (e.g. A3000
+ and A4000).
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rp5c01.
+
config RTC_DRV_V3020
tristate "EM Microelectronic V3020"
help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 3c0f2b2ac927..2a565f8e06b7 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
+obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0d..a118eb0f1e67 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
+ * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2008 Analog Devices Inc.
+ * Copyright 2004-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
struct device *dev = &pdev->dev;
int ret = 0;
- unsigned long timeout;
+ unsigned long timeout = jiffies + HZ;
dev_dbg_stamp(dev);
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
device_init_wakeup(dev, 1);
+ /* Register our RTC with the RTC framework */
+ rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
+ THIS_MODULE);
+ if (unlikely(IS_ERR(rtc->rtc_dev))) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err;
+ }
+
/* Grab the IRQ and init the hardware */
ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- goto err;
+ goto err_reg;
/* sometimes the bootloader touched things, but the write complete was not
* enabled, so let's just do a quick timeout here since the IRQ will not fire ...
*/
- timeout = jiffies + HZ;
while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
if (time_after(jiffies, timeout))
break;
bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
- /* Register our RTC with the RTC framework */
- rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
- if (unlikely(IS_ERR(rtc->rtc_dev))) {
- ret = PTR_ERR(rtc->rtc_dev);
- goto err_irq;
- }
-
return 0;
- err_irq:
- free_irq(IRQ_RTC, dev);
- err:
+err_reg:
+ rtc_device_unregister(rtc->rtc_dev);
+err:
kfree(rtc);
return ret;
}
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
new file mode 100644
index 000000000000..10b9b7fb4700
--- /dev/null
+++ b/drivers/rtc/rtc-msm6242.c
@@ -0,0 +1,268 @@
+/*
+ * Oki MSM6242 RTC Driver
+ *
+ * Copyright 2009 Geert Uytterhoeven
+ *
+ * Based on the A2000 TOD code in arch/m68k/amiga/config.c
+ * Copyright (C) 1993 Hamish Macdonald
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+
+enum {
+ MSM6242_SECOND1 = 0x0, /* 1-second digit register */
+ MSM6242_SECOND10 = 0x1, /* 10-second digit register */
+ MSM6242_MINUTE1 = 0x2, /* 1-minute digit register */
+ MSM6242_MINUTE10 = 0x3, /* 10-minute digit register */
+ MSM6242_HOUR1 = 0x4, /* 1-hour digit register */
+ MSM6242_HOUR10 = 0x5, /* PM/AM, 10-hour digit register */
+ MSM6242_DAY1 = 0x6, /* 1-day digit register */
+ MSM6242_DAY10 = 0x7, /* 10-day digit register */
+ MSM6242_MONTH1 = 0x8, /* 1-month digit register */
+ MSM6242_MONTH10 = 0x9, /* 10-month digit register */
+ MSM6242_YEAR1 = 0xa, /* 1-year digit register */
+ MSM6242_YEAR10 = 0xb, /* 10-year digit register */
+ MSM6242_WEEK = 0xc, /* Week register */
+ MSM6242_CD = 0xd, /* Control Register D */
+ MSM6242_CE = 0xe, /* Control Register E */
+ MSM6242_CF = 0xf, /* Control Register F */
+};
+
+#define MSM6242_HOUR10_AM (0 << 2)
+#define MSM6242_HOUR10_PM (1 << 2)
+#define MSM6242_HOUR10_HR_MASK (3 << 0)
+
+#define MSM6242_WEEK_SUNDAY 0
+#define MSM6242_WEEK_MONDAY 1
+#define MSM6242_WEEK_TUESDAY 2
+#define MSM6242_WEEK_WEDNESDAY 3
+#define MSM6242_WEEK_THURSDAY 4
+#define MSM6242_WEEK_FRIDAY 5
+#define MSM6242_WEEK_SATURDAY 6
+
+#define MSM6242_CD_30_S_ADJ (1 << 3) /* 30-second adjustment */
+#define MSM6242_CD_IRQ_FLAG (1 << 2)
+#define MSM6242_CD_BUSY (1 << 1)
+#define MSM6242_CD_HOLD (1 << 0)
+
+#define MSM6242_CE_T_MASK (3 << 2)
+#define MSM6242_CE_T_64HZ (0 << 2) /* period 1/64 second */
+#define MSM6242_CE_T_1HZ (1 << 2) /* period 1 second */
+#define MSM6242_CE_T_1MINUTE (2 << 2) /* period 1 minute */
+#define MSM6242_CE_T_1HOUR (3 << 2) /* period 1 hour */
+
+#define MSM6242_CE_ITRPT_STND (1 << 1)
+#define MSM6242_CE_MASK (1 << 0) /* STD.P output control */
+
+#define MSM6242_CF_TEST (1 << 3)
+#define MSM6242_CF_12H (0 << 2)
+#define MSM6242_CF_24H (1 << 2)
+#define MSM6242_CF_STOP (1 << 1)
+#define MSM6242_CF_REST (1 << 0) /* reset */
+
+
+struct msm6242_priv {
+ u32 __iomem *regs;
+ struct rtc_device *rtc;
+};
+
+static inline unsigned int msm6242_read(struct msm6242_priv *priv,
+ unsigned int reg)
+{
+ return __raw_readl(&priv->regs[reg]) & 0xf;
+}
+
+static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
+ unsigned int reg)
+{
+ return __raw_writel(val, &priv->regs[reg]);
+}
+
+static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
+ unsigned int reg)
+{
+ msm6242_write(priv, msm6242_read(priv, reg) | val, reg);
+}
+
+static inline void msm6242_clear(struct msm6242_priv *priv, unsigned int val,
+ unsigned int reg)
+{
+ msm6242_write(priv, msm6242_read(priv, reg) & ~val, reg);
+}
+
+static void msm6242_lock(struct msm6242_priv *priv)
+{
+ int cnt = 5;
+
+ msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+
+ while ((msm6242_read(priv, MSM6242_CD) & MSM6242_CD_BUSY) && cnt--) {
+ msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ udelay(70);
+ msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ }
+
+ if (!cnt)
+ pr_warning("msm6242: timed out waiting for RTC (0x%x)\n",
+ msm6242_read(priv, MSM6242_CD));
+}
+
+static void msm6242_unlock(struct msm6242_priv *priv)
+{
+ msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+}
+
+static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct msm6242_priv *priv = dev_get_drvdata(dev);
+
+ msm6242_lock(priv);
+
+ tm->tm_sec = msm6242_read(priv, MSM6242_SECOND10) * 10 +
+ msm6242_read(priv, MSM6242_SECOND1);
+ tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
+ msm6242_read(priv, MSM6242_MINUTE1);
+ tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
+ msm6242_read(priv, MSM6242_HOUR1);
+ tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
+ msm6242_read(priv, MSM6242_DAY1);
+ tm->tm_wday = msm6242_read(priv, MSM6242_WEEK);
+ tm->tm_mon = msm6242_read(priv, MSM6242_MONTH10) * 10 +
+ msm6242_read(priv, MSM6242_MONTH1) - 1;
+ tm->tm_year = msm6242_read(priv, MSM6242_YEAR10) * 10 +
+ msm6242_read(priv, MSM6242_YEAR1);
+ if (tm->tm_year <= 69)
+ tm->tm_year += 100;
+
+ if (!(msm6242_read(priv, MSM6242_CF) & MSM6242_CF_24H)) {
+ unsigned int pm = msm6242_read(priv, MSM6242_HOUR10) &
+ MSM6242_HOUR10_PM;
+ if (!pm && tm->tm_hour == 12)
+ tm->tm_hour = 0;
+ else if (pm && tm->tm_hour != 12)
+ tm->tm_hour += 12;
+ }
+
+ msm6242_unlock(priv);
+
+ return rtc_valid_tm(tm);
+}
+
+static int msm6242_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct msm6242_priv *priv = dev_get_drvdata(dev);
+
+ msm6242_lock(priv);
+
+ msm6242_write(priv, tm->tm_sec / 10, MSM6242_SECOND10);
+ msm6242_write(priv, tm->tm_sec % 10, MSM6242_SECOND1);
+ msm6242_write(priv, tm->tm_min / 10, MSM6242_MINUTE10);
+ msm6242_write(priv, tm->tm_min % 10, MSM6242_MINUTE1);
+ if (msm6242_read(priv, MSM6242_CF) & MSM6242_CF_24H)
+ msm6242_write(priv, tm->tm_hour / 10, MSM6242_HOUR10);
+ else if (tm->tm_hour >= 12)
+ msm6242_write(priv, MSM6242_HOUR10_PM + (tm->tm_hour - 12) / 10,
+ MSM6242_HOUR10);
+ else
+ msm6242_write(priv, tm->tm_hour / 10, MSM6242_HOUR10);
+ msm6242_write(priv, tm->tm_hour % 10, MSM6242_HOUR1);
+ msm6242_write(priv, tm->tm_mday / 10, MSM6242_DAY10);
+ msm6242_write(priv, tm->tm_mday % 10, MSM6242_DAY1);
+ if (tm->tm_wday != -1)
+ msm6242_write(priv, tm->tm_wday, MSM6242_WEEK);
+ msm6242_write(priv, (tm->tm_mon + 1) / 10, MSM6242_MONTH10);
+ msm6242_write(priv, (tm->tm_mon + 1) % 10, MSM6242_MONTH1);
+ if (tm->tm_year >= 100)
+ tm->tm_year -= 100;
+ msm6242_write(priv, tm->tm_year / 10, MSM6242_YEAR10);
+ msm6242_write(priv, tm->tm_year % 10, MSM6242_YEAR1);
+
+ msm6242_unlock(priv);
+ return 0;
+}
+
+static const struct rtc_class_ops msm6242_rtc_ops = {
+ .read_time = msm6242_read_time,
+ .set_time = msm6242_set_time,
+};
+
+static int __init msm6242_rtc_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ struct msm6242_priv *priv;
+ struct rtc_device *rtc;
+ int error;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = ioremap(res->start, resource_size(res));
+ if (!priv->regs) {
+ error = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ error = PTR_ERR(rtc);
+ goto out_unmap;
+ }
+
+ priv->rtc = rtc;
+ platform_set_drvdata(dev, priv);
+ return 0;
+
+out_unmap:
+ iounmap(priv->regs);
+out_free_priv:
+ kfree(priv);
+ return error;
+}
+
+static int __exit msm6242_rtc_remove(struct platform_device *dev)
+{
+ struct msm6242_priv *priv = platform_get_drvdata(dev);
+
+ rtc_device_unregister(priv->rtc);
+ iounmap(priv->regs);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver msm6242_rtc_driver = {
+ .driver = {
+ .name = "rtc-msm6242",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(msm6242_rtc_remove),
+};
+
+static int __init msm6242_rtc_init(void)
+{
+ return platform_driver_probe(&msm6242_rtc_driver, msm6242_rtc_probe);
+}
+
+static void __exit msm6242_rtc_fini(void)
+{
+ platform_driver_unregister(&msm6242_rtc_driver);
+}
+
+module_init(msm6242_rtc_init);
+module_exit(msm6242_rtc_fini);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Oki MSM6242 RTC driver");
+MODULE_ALIAS("platform:rtc-msm6242");
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
new file mode 100644
index 000000000000..e1313feb060f
--- /dev/null
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -0,0 +1,222 @@
+/*
+ * Ricoh RP5C01 RTC Driver
+ *
+ * Copyright 2009 Geert Uytterhoeven
+ *
+ * Based on the A3000 TOD code in arch/m68k/amiga/config.c
+ * Copyright (C) 1993 Hamish Macdonald
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+
+enum {
+ RP5C01_1_SECOND = 0x0, /* MODE 00 */
+ RP5C01_10_SECOND = 0x1, /* MODE 00 */
+ RP5C01_1_MINUTE = 0x2, /* MODE 00 and MODE 01 */
+ RP5C01_10_MINUTE = 0x3, /* MODE 00 and MODE 01 */
+ RP5C01_1_HOUR = 0x4, /* MODE 00 and MODE 01 */
+ RP5C01_10_HOUR = 0x5, /* MODE 00 and MODE 01 */
+ RP5C01_DAY_OF_WEEK = 0x6, /* MODE 00 and MODE 01 */
+ RP5C01_1_DAY = 0x7, /* MODE 00 and MODE 01 */
+ RP5C01_10_DAY = 0x8, /* MODE 00 and MODE 01 */
+ RP5C01_1_MONTH = 0x9, /* MODE 00 */
+ RP5C01_10_MONTH = 0xa, /* MODE 00 */
+ RP5C01_1_YEAR = 0xb, /* MODE 00 */
+ RP5C01_10_YEAR = 0xc, /* MODE 00 */
+
+ RP5C01_12_24_SELECT = 0xa, /* MODE 01 */
+ RP5C01_LEAP_YEAR = 0xb, /* MODE 01 */
+
+ RP5C01_MODE = 0xd, /* all modes */
+ RP5C01_TEST = 0xe, /* all modes */
+ RP5C01_RESET = 0xf, /* all modes */
+};
+
+#define RP5C01_12_24_SELECT_12 (0 << 0)
+#define RP5C01_12_24_SELECT_24 (1 << 0)
+
+#define RP5C01_10_HOUR_AM (0 << 1)
+#define RP5C01_10_HOUR_PM (1 << 1)
+
+#define RP5C01_MODE_TIMER_EN (1 << 3) /* timer enable */
+#define RP5C01_MODE_ALARM_EN (1 << 2) /* alarm enable */
+
+#define RP5C01_MODE_MODE_MASK (3 << 0)
+#define RP5C01_MODE_MODE00 (0 << 0) /* time */
+#define RP5C01_MODE_MODE01 (1 << 0) /* alarm, 12h/24h, leap year */
+#define RP5C01_MODE_RAM_BLOCK10 (2 << 0) /* RAM 4 bits x 13 */
+#define RP5C01_MODE_RAM_BLOCK11 (3 << 0) /* RAM 4 bits x 13 */
+
+#define RP5C01_RESET_1HZ_PULSE (1 << 3)
+#define RP5C01_RESET_16HZ_PULSE (1 << 2)
+#define RP5C01_RESET_SECOND (1 << 1) /* reset divider stages for */
+ /* seconds or smaller units */
+#define RP5C01_RESET_ALARM (1 << 0) /* reset all alarm registers */
+
+
+struct rp5c01_priv {
+ u32 __iomem *regs;
+ struct rtc_device *rtc;
+};
+
+static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
+ unsigned int reg)
+{
+ return __raw_readl(&priv->regs[reg]) & 0xf;
+}
+
+static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
+ unsigned int reg)
+{
+ return __raw_writel(val, &priv->regs[reg]);
+}
+
+static void rp5c01_lock(struct rp5c01_priv *priv)
+{
+ rp5c01_write(priv, RP5C01_MODE_MODE00, RP5C01_MODE);
+}
+
+static void rp5c01_unlock(struct rp5c01_priv *priv)
+{
+ rp5c01_write(priv, RP5C01_MODE_TIMER_EN | RP5C01_MODE_MODE01,
+ RP5C01_MODE);
+}
+
+static int rp5c01_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rp5c01_priv *priv = dev_get_drvdata(dev);
+
+ rp5c01_lock(priv);
+
+ tm->tm_sec = rp5c01_read(priv, RP5C01_10_SECOND) * 10 +
+ rp5c01_read(priv, RP5C01_1_SECOND);
+ tm->tm_min = rp5c01_read(priv, RP5C01_10_MINUTE) * 10 +
+ rp5c01_read(priv, RP5C01_1_MINUTE);
+ tm->tm_hour = rp5c01_read(priv, RP5C01_10_HOUR) * 10 +
+ rp5c01_read(priv, RP5C01_1_HOUR);
+ tm->tm_mday = rp5c01_read(priv, RP5C01_10_DAY) * 10 +
+ rp5c01_read(priv, RP5C01_1_DAY);
+ tm->tm_wday = rp5c01_read(priv, RP5C01_DAY_OF_WEEK);
+ tm->tm_mon = rp5c01_read(priv, RP5C01_10_MONTH) * 10 +
+ rp5c01_read(priv, RP5C01_1_MONTH) - 1;
+ tm->tm_year = rp5c01_read(priv, RP5C01_10_YEAR) * 10 +
+ rp5c01_read(priv, RP5C01_1_YEAR);
+ if (tm->tm_year <= 69)
+ tm->tm_year += 100;
+
+ rp5c01_unlock(priv);
+
+ return rtc_valid_tm(tm);
+}
+
+static int rp5c01_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rp5c01_priv *priv = dev_get_drvdata(dev);
+
+ rp5c01_lock(priv);
+
+ rp5c01_write(priv, tm->tm_sec / 10, RP5C01_10_SECOND);
+ rp5c01_write(priv, tm->tm_sec % 10, RP5C01_1_SECOND);
+ rp5c01_write(priv, tm->tm_min / 10, RP5C01_10_MINUTE);
+ rp5c01_write(priv, tm->tm_min % 10, RP5C01_1_MINUTE);
+ rp5c01_write(priv, tm->tm_hour / 10, RP5C01_10_HOUR);
+ rp5c01_write(priv, tm->tm_hour % 10, RP5C01_1_HOUR);
+ rp5c01_write(priv, tm->tm_mday / 10, RP5C01_10_DAY);
+ rp5c01_write(priv, tm->tm_mday % 10, RP5C01_1_DAY);
+ if (tm->tm_wday != -1)
+ rp5c01_write(priv, tm->tm_wday, RP5C01_DAY_OF_WEEK);
+ rp5c01_write(priv, (tm->tm_mon + 1) / 10, RP5C01_10_MONTH);
+ rp5c01_write(priv, (tm->tm_mon + 1) % 10, RP5C01_1_MONTH);
+ if (tm->tm_year >= 100)
+ tm->tm_year -= 100;
+ rp5c01_write(priv, tm->tm_year / 10, RP5C01_10_YEAR);
+ rp5c01_write(priv, tm->tm_year % 10, RP5C01_1_YEAR);
+
+ rp5c01_unlock(priv);
+ return 0;
+}
+
+static const struct rtc_class_ops rp5c01_rtc_ops = {
+ .read_time = rp5c01_read_time,
+ .set_time = rp5c01_set_time,
+};
+
+static int __init rp5c01_rtc_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ struct rp5c01_priv *priv;
+ struct rtc_device *rtc;
+ int error;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = ioremap(res->start, resource_size(res));
+ if (!priv->regs) {
+ error = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ error = PTR_ERR(rtc);
+ goto out_unmap;
+ }
+
+ priv->rtc = rtc;
+ platform_set_drvdata(dev, priv);
+ return 0;
+
+out_unmap:
+ iounmap(priv->regs);
+out_free_priv:
+ kfree(priv);
+ return error;
+}
+
+static int __exit rp5c01_rtc_remove(struct platform_device *dev)
+{
+ struct rp5c01_priv *priv = platform_get_drvdata(dev);
+
+ rtc_device_unregister(priv->rtc);
+ iounmap(priv->regs);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver rp5c01_rtc_driver = {
+ .driver = {
+ .name = "rtc-rp5c01",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(rp5c01_rtc_remove),
+};
+
+static int __init rp5c01_rtc_init(void)
+{
+ return platform_driver_probe(&rp5c01_rtc_driver, rp5c01_rtc_probe);
+}
+
+static void __exit rp5c01_rtc_fini(void)
+{
+ platform_driver_unregister(&rp5c01_rtc_driver);
+}
+
+module_init(rp5c01_rtc_init);
+module_exit(rp5c01_rtc_fini);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ricoh RP5C01 RTC driver");
+MODULE_ALIAS("platform:rtc-rp5c01");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index f11297aff854..2c839d0d21bd 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Real Time Clock unit.
*
- * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index f8b1f04f26b8..447af034ac06 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1696,8 +1696,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"unsolicited interrupt received "
"(sense available)");
- device->discipline->dump_sense_dbf(device, NULL, irb,
- "unsolicited");
+ device->discipline->dump_sense_dbf(device, irb, "unsolicited");
}
dasd_schedule_device_bh(device);
@@ -2945,38 +2944,16 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req,
struct irb *irb, char *reason)
{
u64 *sense;
- int sl;
- struct tsb *tsb;
- sense = NULL;
- tsb = NULL;
- if (req && scsw_is_tm(&req->irb.scsw)) {
- if (irb->scsw.tm.tcw)
- tsb = tcw_get_tsb(
- (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
- if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
- switch (tsb->flags & 0x07) {
- case 1: /* tsa_iostat */
- sense = (u64 *)tsb->tsa.iostat.sense;
- break;
- case 2: /* ts_ddpc */
- sense = (u64 *)tsb->tsa.ddpc.sense;
- break;
- case 3: /* tsa_intrg */
- break;
- }
- }
- } else {
- if (irb->esw.esw0.erw.cons)
- sense = (u64 *)irb->ecw;
- }
+ sense = (u64 *) dasd_get_sense(irb);
if (sense) {
- for (sl = 0; sl < 4; sl++) {
- DBF_DEV_EVENT(DBF_EMERG, device,
- "%s: %016llx %016llx %016llx %016llx",
- reason, sense[0], sense[1], sense[2],
- sense[3]);
- }
+ DBF_DEV_EVENT(DBF_EMERG, device,
+ "%s: %s %02x%02x%02x %016llx %016llx %016llx "
+ "%016llx", reason,
+ scsw_is_tm(&irb->scsw) ? "t" : "c",
+ scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
+ scsw_dstat(&irb->scsw), sense[0], sense[1],
+ sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s",
"SORRY - NO VALID SENSE AVAILABLE\n");
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d970ce2814be..cb8f9cef7429 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -172,7 +172,7 @@ dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev;
/* dump sense data to s390 debugfeature*/
if (device->discipline && device->discipline->dump_sense_dbf)
- device->discipline->dump_sense_dbf(device, cqr, irb, "log");
+ device->discipline->dump_sense_dbf(device, irb, "log");
}
EXPORT_SYMBOL(dasd_log_sense_dbf);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index e21ee735f926..9037d108ca2c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -241,7 +241,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
/* check for unsolicited interrupts */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"unsolicited interrupt received");
- device->discipline->dump_sense_dbf(device, NULL, irb, "unsolicited");
+ device->discipline->dump_sense_dbf(device, irb, "unsolicited");
dasd_schedule_device_bh(device);
return;
};
@@ -447,14 +447,17 @@ static void
dasd_fba_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req,
struct irb *irb, char *reason)
{
- int sl;
- if (irb->esw.esw0.erw.cons) {
- for (sl = 0; sl < 4; sl++) {
- DBF_DEV_EVENT(DBF_EMERG, device,
- "%s: %08x %08x %08x %08x",
- reason, irb->ecw[8 * 0], irb->ecw[8 * 1],
- irb->ecw[8 * 2], irb->ecw[8 * 3]);
- }
+ u64 *sense;
+
+ sense = (u64 *) dasd_get_sense(irb);
+ if (sense) {
+ DBF_DEV_EVENT(DBF_EMERG, device,
+ "%s: %s %02x%02x%02x %016llx %016llx %016llx "
+ "%016llx", reason,
+ scsw_is_tm(&irb->scsw) ? "t" : "c",
+ scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
+ scsw_dstat(&irb->scsw), sense[0], sense[1],
+ sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s",
"SORRY - NO VALID SENSE AVAILABLE\n");
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fd63b2f2bda9..b699ca356ac5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -284,8 +284,7 @@ struct dasd_discipline {
dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
struct irb *);
- void (*dump_sense_dbf) (struct dasd_device *, struct dasd_ccw_req *,
- struct irb *, char *);
+ void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
void (*handle_unsolicited_interrupt) (struct dasd_device *,
struct irb *);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 7892550d7932..3234e90bd7f9 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path;
}
filp->private_data = monpriv;
- dev_set_drvdata(&monreader_device, monpriv);
+ dev_set_drvdata(monreader_device, monpriv);
unlock_kernel();
return nonseekable_open(inode, filp);
@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = {
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
- struct mon_private *monpriv = dev_get_drvdata(&dev);
+ struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index 85f491ea929c..7a7bfc947d97 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -92,5 +92,10 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short);
void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *);
+#ifdef CONFIG_SCLP_CONSOLE
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
+#else
+static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
+#endif
+
#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index adb3dd301528..fa4c9662f65e 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 common i/o drivers
#
-obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 425e8f89a6c5..37aa611d4ac5 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -37,29 +37,6 @@ struct channel_path_desc {
struct channel_path;
-struct css_general_char {
- u64 : 12;
- u32 dynio : 1; /* bit 12 */
- u32 : 28;
- u32 aif : 1; /* bit 41 */
- u32 : 3;
- u32 mcss : 1; /* bit 45 */
- u32 fcs : 1; /* bit 46 */
- u32 : 1;
- u32 ext_mb : 1; /* bit 48 */
- u32 : 7;
- u32 aif_tdd : 1; /* bit 56 */
- u32 : 1;
- u32 qebsm : 1; /* bit 58 */
- u32 : 8;
- u32 aif_osa : 1; /* bit 67 */
- u32 : 14;
- u32 cib : 1; /* bit 82 */
- u32 : 5;
- u32 fcx : 1; /* bit 88 */
- u32 : 7;
-}__attribute__((packed));
-
struct css_chsc_char {
u64 res;
u64 : 20;
@@ -72,7 +49,6 @@ struct css_chsc_char {
u32 : 19;
}__attribute__((packed));
-extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
struct chsc_ssd_info {
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index e79e18101f87..63abb06c4edb 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -4,8 +4,7 @@
* Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422)
* Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
* by D. Gilbert and aeb (20020609)
- * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
- * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
+ * Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624
*/
#include <linux/blkdev.h>
@@ -56,9 +55,9 @@ static const char * cdb_byte0_names[] = {
"Read Buffer",
/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
/* 40-41 */ "Change Definition", "Write Same(10)",
-/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
- "Play audio(10)", "Get configuration", "Play audio msf",
- "Play audio track/index",
+/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP",
+ "Read density support", "Play audio(10)", "Get configuration",
+ "Play audio msf", "Play audio track/index",
/* 49-4f */ "Play track relative(10)", "Get event status notification",
"Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
NULL,
@@ -71,12 +70,13 @@ static const char * cdb_byte0_names[] = {
/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
+/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB",
+ "Variable length",
/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
"Receive copy results",
/* 85-89 */ "ATA command pass through(16)", "Access control in",
"Access control out", "Read(16)", "Memory Export Out(16)",
-/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
+/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes",
"Write and verify(16)", "Verify(16)",
/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
"Lock/unlock cache(16)", "Write same(16)", NULL,
@@ -107,22 +107,24 @@ struct value_name_pair {
};
static const struct value_name_pair maint_in_arr[] = {
- {0x5, "Report device identifier"},
+ {0x5, "Report identifying information"},
{0xa, "Report target port groups"},
{0xb, "Report aliases"},
{0xc, "Report supported operation codes"},
{0xd, "Report supported task management functions"},
{0xe, "Report priority"},
{0xf, "Report timestamp"},
+ {0x10, "Management protocol in"},
};
#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
static const struct value_name_pair maint_out_arr[] = {
- {0x6, "Set device identifier"},
+ {0x6, "Set identifying information"},
{0xa, "Set target port groups"},
{0xb, "Change aliases"},
{0xe, "Set priority"},
- {0xe, "Set timestamp"},
+ {0xf, "Set timestamp"},
+ {0x10, "Management protocol out"},
};
#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
@@ -412,6 +414,7 @@ static const struct error_info additional[] =
{0x0004, "Beginning-of-partition/medium detected"},
{0x0005, "End-of-data detected"},
{0x0006, "I/O process terminated"},
+ {0x0007, "Programmable early warning detected"},
{0x0011, "Audio play operation in progress"},
{0x0012, "Audio play operation paused"},
{0x0013, "Audio play operation successfully completed"},
@@ -425,6 +428,7 @@ static const struct error_info additional[] =
{0x001B, "Set capacity operation in progress"},
{0x001C, "Verify operation in progress"},
{0x001D, "ATA pass through information available"},
+ {0x001E, "Conflicting SA creation request"},
{0x0100, "No index/sector signal"},
@@ -449,9 +453,12 @@ static const struct error_info additional[] =
{0x040B, "Logical unit not accessible, target port in standby state"},
{0x040C, "Logical unit not accessible, target port in unavailable "
"state"},
+ {0x040D, "Logical unit not ready, structure check required"},
{0x0410, "Logical unit not ready, auxiliary memory not accessible"},
{0x0411, "Logical unit not ready, notify (enable spinup) required"},
{0x0412, "Logical unit not ready, offline"},
+ {0x0413, "Logical unit not ready, SA creation in progress"},
+ {0x0414, "Logical unit not ready, space allocation in progress"},
{0x0500, "Logical unit does not respond to selection"},
@@ -479,6 +486,9 @@ static const struct error_info additional[] =
{0x0B03, "Warning - background self-test failed"},
{0x0B04, "Warning - background pre-scan detected medium error"},
{0x0B05, "Warning - background medium scan detected medium error"},
+ {0x0B06, "Warning - non-volatile cache now volatile"},
+ {0x0B07, "Warning - degraded power to non-volatile cache"},
+ {0x0B08, "Warning - power loss expected"},
{0x0C00, "Write error"},
{0x0C01, "Write error - recovered with auto reallocation"},
@@ -593,6 +603,7 @@ static const struct error_info additional[] =
{0x1C02, "Grown defect list not found"},
{0x1D00, "Miscompare during verify operation"},
+ {0x1D01, "Miscompare verify of unmapped LBA"},
{0x1E00, "Recovered id with ECC correction"},
@@ -626,6 +637,7 @@ static const struct error_info additional[] =
{0x2405, "Security working key frozen"},
{0x2406, "Nonce not unique"},
{0x2407, "Nonce timestamp out of range"},
+ {0x2408, "Invalid XCDB"},
{0x2500, "Logical unit not supported"},
@@ -656,10 +668,12 @@ static const struct error_info additional[] =
{0x2704, "Persistent write protect"},
{0x2705, "Permanent write protect"},
{0x2706, "Conditional write protect"},
+ {0x2707, "Space allocation failed write protect"},
{0x2800, "Not ready to ready change, medium may have changed"},
{0x2801, "Import or export element accessed"},
{0x2802, "Format-layer may have changed"},
+ {0x2803, "Import/export element accessed, medium changed"},
{0x2900, "Power on, reset, or bus device reset occurred"},
{0x2901, "Power on occurred"},
@@ -680,11 +694,16 @@ static const struct error_info additional[] =
{0x2A07, "Implicit asymmetric access state transition failed"},
{0x2A08, "Priority changed"},
{0x2A09, "Capacity data has changed"},
+ {0x2A0A, "Error history I_T nexus cleared"},
+ {0x2A0B, "Error history snapshot released"},
+ {0x2A0C, "Error recovery attributes have changed"},
+ {0x2A0D, "Data encryption capabilities changed"},
{0x2A10, "Timestamp changed"},
{0x2A11, "Data encryption parameters changed by another i_t nexus"},
{0x2A12, "Data encryption parameters changed by vendor specific "
"event"},
{0x2A13, "Data encryption key instance counter has changed"},
+ {0x2A14, "SA creation capabilities data has changed"},
{0x2B00, "Copy cannot execute since host cannot disconnect"},
@@ -723,6 +742,8 @@ static const struct error_info additional[] =
{0x300C, "WORM medium - overwrite attempted"},
{0x300D, "WORM medium - integrity check"},
{0x3010, "Medium not formatted"},
+ {0x3011, "Incompatible volume type"},
+ {0x3012, "Incompatible volume qualifier"},
{0x3100, "Medium format corrupted"},
{0x3101, "Format command failed"},
@@ -782,6 +803,10 @@ static const struct error_info additional[] =
{0x3B15, "Medium magazine unlocked"},
{0x3B16, "Mechanical positioning or changer error"},
{0x3B17, "Read past end of user object"},
+ {0x3B18, "Element disabled"},
+ {0x3B19, "Element enabled"},
+ {0x3B1A, "Data transfer device removed"},
+ {0x3B1B, "Data transfer device inserted"},
{0x3D00, "Invalid bits in identify message"},
@@ -882,6 +907,8 @@ static const struct error_info additional[] =
{0x5506, "Auxiliary memory out of space"},
{0x5507, "Quota error"},
{0x5508, "Maximum number of supplemental decryption keys exceeded"},
+ {0x5509, "Medium auxiliary memory not accessible"},
+ {0x550A, "Data currently unavailable"},
{0x5700, "Unable to recover table-of-contents"},
@@ -993,6 +1020,12 @@ static const struct error_info additional[] =
{0x5E02, "Standby condition activated by timer"},
{0x5E03, "Idle condition activated by command"},
{0x5E04, "Standby condition activated by command"},
+ {0x5E05, "Idle_b condition activated by timer"},
+ {0x5E06, "Idle_b condition activated by command"},
+ {0x5E07, "Idle_c condition activated by timer"},
+ {0x5E08, "Idle_c condition activated by command"},
+ {0x5E09, "Standby_y condition activated by timer"},
+ {0x5E0A, "Standby_y condition activated by command"},
{0x5E41, "Power state change to active"},
{0x5E42, "Power state change to idle"},
{0x5E43, "Power state change to standby"},
@@ -1091,7 +1124,28 @@ static const struct error_info additional[] =
{0x7403, "Incorrect data encryption key"},
{0x7404, "Cryptographic integrity validation failed"},
{0x7405, "Error decrypting data"},
+ {0x7406, "Unknown signature verification key"},
+ {0x7407, "Encryption parameters not useable"},
+ {0x7408, "Digital signature validation failure"},
+ {0x7409, "Encryption mode mismatch on read"},
+ {0x740A, "Encrypted block not raw read enabled"},
+ {0x740B, "Incorrect Encryption parameters"},
+ {0x740C, "Unable to decrypt parameter list"},
+ {0x740D, "Encryption algorithm disabled"},
+ {0x7410, "SA creation parameter value invalid"},
+ {0x7411, "SA creation parameter value rejected"},
+ {0x7412, "Invalid SA usage"},
+ {0x7421, "Data Encryption configuration prevented"},
+ {0x7430, "SA creation parameter not supported"},
+ {0x7440, "Authentication failed"},
+ {0x7461, "External data encryption key manager access error"},
+ {0x7462, "External data encryption key manager error"},
+ {0x7463, "External data encryption key not found"},
+ {0x7464, "External data encryption request not authorized"},
+ {0x746E, "External data encryption control timeout"},
+ {0x746F, "External data encryption control error"},
{0x7471, "Logical unit access not authorized"},
+ {0x7479, "Security conflict in translated device"},
{0, NULL}
};
@@ -1103,12 +1157,12 @@ struct error_info2 {
static const struct error_info2 additional2[] =
{
- {0x40,0x00,0x7f,"Ram failure (%x)"},
- {0x40,0x80,0xff,"Diagnostic failure on component (%x)"},
- {0x41,0x00,0xff,"Data path failure (%x)"},
- {0x42,0x00,0xff,"Power-on or self-test failure (%x)"},
- {0x4D,0x00,0xff,"Tagged overlapped commands (queue tag %x)"},
- {0x70,0x00,0xff,"Decompression exception short algorithm id of %x"},
+ {0x40, 0x00, 0x7f, "Ram failure (%x)"},
+ {0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"},
+ {0x41, 0x00, 0xff, "Data path failure (%x)"},
+ {0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"},
+ {0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"},
+ {0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"},
{0, 0, 0, NULL}
};
@@ -1157,14 +1211,15 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) {
int i;
unsigned short code = ((asc << 8) | ascq);
- for (i=0; additional[i].text; i++)
+ for (i = 0; additional[i].text; i++)
if (additional[i].code12 == code)
return additional[i].text;
- for (i=0; additional2[i].fmt; i++)
+ for (i = 0; additional2[i].fmt; i++) {
if (additional2[i].code1 == asc &&
- additional2[i].code2_min >= ascq &&
- additional2[i].code2_max <= ascq)
+ ascq >= additional2[i].code2_min &&
+ ascq <= additional2[i].code2_max)
return additional2[i].fmt;
+ }
#endif
return NULL;
}
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
index 25a2032bfa26..70d060b7ff4f 100644
--- a/drivers/scsi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3
cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 74369a3f963b..c399f485aa7d 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
#include <linux/inet.h>
#include <linux/crypto.h>
+#include <linux/if_vlan.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
struct cxgb3i_adapter *snic;
int i;
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(ndev);
+
read_lock(&cxgb3i_snic_rwlock);
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
for (i = 0; i < snic->hba_cnt; i++) {
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index a518f2eff19a..66a65de5f5c3 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -292,18 +292,15 @@ static int scsi_dh_notifier(struct notifier_block *nb,
sdev = to_scsi_device(dev);
if (action == BUS_NOTIFY_ADD_DEVICE) {
+ err = device_create_file(dev, &scsi_dh_state_attr);
+ /* don't care about err */
devinfo = device_handler_match(NULL, sdev);
- if (!devinfo)
- goto out;
-
- err = scsi_dh_handler_attach(sdev, devinfo);
- if (!err)
- err = device_create_file(dev, &scsi_dh_state_attr);
+ if (devinfo)
+ err = scsi_dh_handler_attach(sdev, devinfo);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
device_remove_file(dev, &scsi_dh_state_attr);
scsi_dh_handler_detach(sdev, NULL);
}
-out:
return err;
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index a84072865fc2..2c266c01dc5a 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
* limitation for the device. Try 40-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 32-bit DMA "
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 40-bit DMA "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index eabf36502856..bfc996971b81 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct vnic_wq_copy *wq,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc,
- u32 sg_count)
+ int sg_count)
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
char msg[2];
if (sg_count) {
- BUG_ON(sg_count < 0);
- BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
-
/* For each SGE, create a device desc entry */
desc = io_req->sgl_list;
for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
struct fnic *fnic;
struct vnic_wq_copy *wq;
int ret;
- u32 sg_count;
+ int sg_count;
unsigned long flags;
unsigned long ptr;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 869a11bdccbd..9928704e235f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
+
+ if (hostdata->madapter_info.os_type == 3) {
+ enable_fast_fail(hostdata);
+ return;
+ }
}
- enable_fast_fail(hostdata);
+ send_srp_login(hostdata);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f3c40898fc7d..90c94da8baa4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -897,8 +897,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
}
- blk_end_request_all(req, -EIO);
- scsi_next_command(cmd);
+ if (blk_end_request_err(req, -EIO))
+ scsi_requeue_command(q, cmd);
+ else
+ scsi_next_command(cmd);
break;
case ACTION_REPREP:
/* Unprep the request and put it back at the head of the queue.
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2eee9e6e4fe8..292c02f810d0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3670,13 +3670,14 @@ static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
int flagset;
+ unsigned long flags;
if (!rport->rqst_q)
return;
get_device(&rport->dev);
- spin_lock(rport->rqst_q->queue_lock);
+ spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
@@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
__blk_run_queue(rport->rqst_q);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
- spin_unlock(rport->rqst_q->queue_lock);
+ spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
put_device(&rport->dev);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4daa..ef142fd47a83 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
- struct request_queue *q = sfp->parentdp->device->request_queue;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- return blk_verify_command(&q->cmd_filter,
- cmd, filp->f_mode & FMODE_WRITE);
+ return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 97f3158fa7b5..27e84e4b1fa9 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev)
host = ncr_attach(&zalon7xx_template, unit, &device);
if (!host)
- goto fail;
+ return -ENODEV;
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index fb867a9f55e9..cf5de314e745 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1337,14 +1337,12 @@ static void serial8250_start_tx(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr, iir;
+ unsigned char lsr;
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- iir = serial_in(up, UART_IIR) & 0x0f;
if ((up->port.type == PORT_RM9000) ?
- (lsr & UART_LSR_THRE &&
- (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) :
- (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT))
+ (lsr & UART_LSR_THRE) :
+ (lsr & UART_LSR_TEMT))
transmit_chars(up);
}
}
@@ -1677,7 +1675,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
INIT_LIST_HEAD(&up->list);
i->head = &up->list;
spin_unlock_irq(&i->lock);
-
+ irq_flags |= up->port.irqflags;
ret = request_irq(up->port.irq, serial8250_interrupt,
irq_flags, "serial", i);
if (ret < 0)
@@ -2026,7 +2024,7 @@ static int serial8250_startup(struct uart_port *port)
* allow register changes to become visible.
*/
spin_lock_irqsave(&up->port.lock, flags);
- if (up->port.flags & UPF_SHARE_IRQ)
+ if (up->port.irqflags & IRQF_SHARED)
disable_irq_nosync(up->port.irq);
wait_for_xmitr(up, UART_LSR_THRE);
@@ -2039,7 +2037,7 @@ static int serial8250_startup(struct uart_port *port)
iir = serial_in(up, UART_IIR);
serial_out(up, UART_IER, 0);
- if (up->port.flags & UPF_SHARE_IRQ)
+ if (up->port.irqflags & IRQF_SHARED)
enable_irq(up->port.irq);
spin_unlock_irqrestore(&up->port.lock, flags);
@@ -2671,6 +2669,7 @@ static void __init serial8250_isa_init_ports(void)
i++, up++) {
up->port.iobase = old_serial_port[i].port;
up->port.irq = irq_canonicalize(old_serial_port[i].irq);
+ up->port.irqflags = old_serial_port[i].irqflags;
up->port.uartclk = old_serial_port[i].baud_base * 16;
up->port.flags = old_serial_port[i].flags;
up->port.hub6 = old_serial_port[i].hub6;
@@ -2679,7 +2678,7 @@ static void __init serial8250_isa_init_ports(void)
up->port.regshift = old_serial_port[i].iomem_reg_shift;
set_io_from_upio(&up->port);
if (share_irqs)
- up->port.flags |= UPF_SHARE_IRQ;
+ up->port.irqflags |= IRQF_SHARED;
}
}
@@ -2869,6 +2868,7 @@ int __init early_serial_setup(struct uart_port *port)
p->iobase = port->iobase;
p->membase = port->membase;
p->irq = port->irq;
+ p->irqflags = port->irqflags;
p->uartclk = port->uartclk;
p->fifosize = port->fifosize;
p->regshift = port->regshift;
@@ -2942,6 +2942,7 @@ static int __devinit serial8250_probe(struct platform_device *dev)
port.iobase = p->iobase;
port.membase = p->membase;
port.irq = p->irq;
+ port.irqflags = p->irqflags;
port.uartclk = p->uartclk;
port.regshift = p->regshift;
port.iotype = p->iotype;
@@ -2954,7 +2955,7 @@ static int __devinit serial8250_probe(struct platform_device *dev)
port.serial_out = p->serial_out;
port.dev = &dev->dev;
if (share_irqs)
- port.flags |= UPF_SHARE_IRQ;
+ port.irqflags |= IRQF_SHARED;
ret = serial8250_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
@@ -3096,6 +3097,7 @@ int serial8250_register_port(struct uart_port *port)
uart->port.iobase = port->iobase;
uart->port.membase = port->membase;
uart->port.irq = port->irq;
+ uart->port.irqflags = port->irqflags;
uart->port.uartclk = port->uartclk;
uart->port.fifosize = port->fifosize;
uart->port.regshift = port->regshift;
diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h
index 520260326f3d..9b34b04f64f9 100644
--- a/drivers/serial/8250.h
+++ b/drivers/serial/8250.h
@@ -20,6 +20,7 @@ struct old_serial_port {
unsigned int baud_base;
unsigned int port;
unsigned int irq;
+ unsigned long irqflags;
unsigned int flags;
unsigned char hub6;
unsigned char io_type;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a07015d646dd..6160e03f410c 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -759,6 +759,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return 0;
@@ -3557,6 +3559,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 66f52674ca0c..12bd64684f12 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -707,12 +707,13 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
{
- unsigned short ssr_status, scr_status;
+ unsigned short ssr_status, scr_status, err_enabled;
struct uart_port *port = ptr;
irqreturn_t ret = IRQ_NONE;
ssr_status = sci_in(port, SCxSR);
scr_status = sci_in(port, SCSCR);
+ err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
/* Tx Interrupt */
if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE))
@@ -721,10 +722,10 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE))
ret = sci_rx_interrupt(irq, ptr);
/* Error Interrupt */
- if ((ssr_status & 0x0080) && (scr_status & SCI_CTRL_FLAGS_REIE))
+ if ((ssr_status & 0x0080) && err_enabled)
ret = sci_er_interrupt(irq, ptr);
/* Break Interrupt */
- if ((ssr_status & 0x0010) && (scr_status & SCI_CTRL_FLAGS_REIE))
+ if ((ssr_status & 0x0010) && err_enabled)
ret = sci_br_interrupt(irq, ptr);
return ret;
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index 0573f3b5175e..dac550e57c29 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2004-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* Based on drivers/serial/8250.c, by Russell King.
*
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb37066..8980a5640bd9 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e857..f1db395dd889 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ int do_setup = -1;
+ int (*setup_transfer)(struct spi_device *,
+ struct spi_transfer *);
+
+ setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
unsigned tmp;
unsigned cs_change;
int status;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
tmp = 0;
cs_change = 1;
status = 0;
- setup_transfer = NULL;
list_for_each_entry (t, &m->transfers, transfer_list) {
- /* override or restore speed and wordsize */
- if (t->speed_hz || t->bits_per_word) {
- setup_transfer = bitbang->setup_transfer;
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
if (!setup_transfer) {
status = -ENOPROTOOPT;
break;
}
- }
- if (setup_transfer) {
status = setup_transfer(spi, t);
if (status < 0)
break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
m->status = status;
m->complete(m->context);
- /* restore speed and wordsize */
- if (setup_transfer)
+ /* restore speed and wordsize if it was overridden */
+ if (do_setup == 1)
setup_transfer(spi, NULL);
+ do_setup = 0;
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb2..606e7a40a8da 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
/* Bit masks for spi_device.mode management. Note that incorrect
- * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
- * devices on a shared bus: CS_HIGH, because this device will be
- * active when it shouldn't be; 3WIRE, because when active it won't
- * behave as it should.
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
*
- * REVISIT should changing those two modes be privileged?
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
- | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY)
struct spidev_data {
dev_t devt;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 3fd3e3b412b6..3c6feed46f6e 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -49,29 +49,54 @@ static const u32 ipsflag_irq_shift[] = {
static inline u32 ssb_irqflag(struct ssb_device *dev)
{
- return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
+ u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG);
+ if (tpsflag)
+ return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
+ else
+ /* not irq supported */
+ return 0x3f;
+}
+
+static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag)
+{
+ struct ssb_bus *bus = rdev->bus;
+ int i;
+ for (i = 0; i < bus->nr_devices; i++) {
+ struct ssb_device *dev;
+ dev = &(bus->devices[i]);
+ if (ssb_irqflag(dev) == irqflag)
+ return dev;
+ }
+ return NULL;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
+ * If disabled, 5 is returned.
+ * If not supported, 6 is returned.
*/
unsigned int ssb_mips_irq(struct ssb_device *dev)
{
struct ssb_bus *bus = dev->bus;
+ struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag;
u32 ipsflag;
u32 tmp;
unsigned int irq;
irqflag = ssb_irqflag(dev);
+ if (irqflag == 0x3f)
+ return 6;
ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG);
for (irq = 1; irq <= 4; irq++) {
tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]);
if (tmp == irqflag)
break;
}
- if (irq == 5)
- irq = 0;
+ if (irq == 5) {
+ if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))
+ irq = 0;
+ }
return irq;
}
@@ -97,25 +122,56 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag = ssb_irqflag(dev);
+ BUG_ON(oldirq == 6);
+
dev->irq = irq + 2;
- ssb_dprintk(KERN_INFO PFX
- "set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.coreid, oldirq, irq);
/* clear the old irq */
if (oldirq == 0)
ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)));
- else
+ else if (oldirq != 5)
clear_irq(bus, oldirq);
/* assign the new one */
if (irq == 0) {
ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC)));
} else {
+ u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG);
+ if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) {
+ u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq];
+ struct ssb_device *olddev = find_device(dev, oldipsflag);
+ if (olddev)
+ set_irq(olddev, 0);
+ }
irqflag <<= ipsflag_irq_shift[irq];
- irqflag |= (ssb_read32(mdev, SSB_IPSFLAG) & ~ipsflag_irq_mask[irq]);
+ irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
+ ssb_dprintk(KERN_INFO PFX
+ "set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.coreid, oldirq+2, irq+2);
+}
+
+static void print_irq(struct ssb_device *dev, unsigned int irq)
+{
+ int i;
+ static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
+ ssb_dprintk(KERN_INFO PFX
+ "core 0x%04x, irq :", dev->id.coreid);
+ for (i = 0; i <= 6; i++) {
+ ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" ");
+ }
+ ssb_dprintk("\n");
+}
+
+static void dump_irq(struct ssb_bus *bus)
+{
+ int i;
+ for (i = 0; i < bus->nr_devices; i++) {
+ struct ssb_device *dev;
+ dev = &(bus->devices[i]);
+ print_irq(dev, ssb_mips_irq(dev));
+ }
}
static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
@@ -197,16 +253,23 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
/* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */
for (irq = 2, i = 0; i < bus->nr_devices; i++) {
+ int mips_irq;
dev = &(bus->devices[i]);
- dev->irq = ssb_mips_irq(dev) + 2;
+ mips_irq = ssb_mips_irq(dev);
+ if (mips_irq > 4)
+ dev->irq = 0;
+ else
+ dev->irq = mips_irq + 2;
+ if (dev->irq > 5)
+ continue;
switch (dev->id.coreid) {
case SSB_DEV_USB11_HOST:
/* shouldn't need a separate irq line for non-4710, most of them have a proper
* external usb controller on the pci */
if ((bus->chip_id == 0x4710) && (irq <= 4)) {
set_irq(dev, irq++);
- break;
}
+ break;
/* fallthrough */
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
@@ -220,6 +283,8 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
}
}
}
+ ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
+ dump_irq(bus);
ssb_mips_serial_init(mcore);
ssb_mips_flash_detect(mcore);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b15474d5e34..5b15d9d8896b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -560,7 +560,7 @@ static void acm_waker(struct work_struct *waker)
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm;
- int rv = -EINVAL;
+ int rv = -ENODEV;
int i;
dbg("Entering acm_tty_open.");
@@ -687,7 +687,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
/* Perform the closing process and see if we need to do the hardware
shutdown */
- if (tty_port_close_start(&acm->port, tty, filp) == 0)
+ if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
return;
acm_port_down(acm, 0);
tty_port_close_end(&acm->port, tty);
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 43dcf9e1af6b..0dddd2f8ff35 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:m66592_udc");
-#define DRIVER_VERSION "18 Oct 2007"
+#define DRIVER_VERSION "26 Jun 2009"
/* module parameters */
#if defined(CONFIG_SUPERH_BUILT_IN_M66592)
@@ -276,24 +276,27 @@ static int pipe_buffer_setting(struct m66592 *m66592,
buf_bsize = 0;
break;
case M66592_BULK:
- bufnum = m66592->bi_bufnum +
- (info->pipe - M66592_BASE_PIPENUM_BULK) * 16;
- m66592->bi_bufnum += 16;
+ /* isochronous pipes may be used as bulk pipes */
+ if (info->pipe > M66592_BASE_PIPENUM_BULK)
+ bufnum = info->pipe - M66592_BASE_PIPENUM_BULK;
+ else
+ bufnum = info->pipe - M66592_BASE_PIPENUM_ISOC;
+
+ bufnum = M66592_BASE_BUFNUM + (bufnum * 16);
buf_bsize = 7;
pipecfg |= M66592_DBLB;
if (!info->dir_in)
pipecfg |= M66592_SHTNAK;
break;
case M66592_ISO:
- bufnum = m66592->bi_bufnum +
+ bufnum = M66592_BASE_BUFNUM +
(info->pipe - M66592_BASE_PIPENUM_ISOC) * 16;
- m66592->bi_bufnum += 16;
buf_bsize = 7;
break;
}
- if (m66592->bi_bufnum > M66592_MAX_BUFNUM) {
- pr_err("m66592 pipe memory is insufficient(%d)\n",
- m66592->bi_bufnum);
+
+ if (buf_bsize && ((bufnum + 16) >= M66592_MAX_BUFNUM)) {
+ pr_err("m66592 pipe memory is insufficient\n");
return -ENOMEM;
}
@@ -313,17 +316,6 @@ static void pipe_buffer_release(struct m66592 *m66592,
if (info->pipe == 0)
return;
- switch (info->type) {
- case M66592_BULK:
- if (is_bulk_pipe(info->pipe))
- m66592->bi_bufnum -= 16;
- break;
- case M66592_ISO:
- if (is_isoc_pipe(info->pipe))
- m66592->bi_bufnum -= 16;
- break;
- }
-
if (is_bulk_pipe(info->pipe)) {
m66592->bulk--;
} else if (is_interrupt_pipe(info->pipe))
@@ -1603,8 +1595,6 @@ static int __init m66592_probe(struct platform_device *pdev)
m66592->timer.data = (unsigned long)m66592;
m66592->reg = reg;
- m66592->bi_bufnum = M66592_BASE_BUFNUM;
-
ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED,
udc_name, m66592);
if (ret < 0) {
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
index 286ce07e7960..9a9c2bf9fbd5 100644
--- a/drivers/usb/gadget/m66592-udc.h
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -506,7 +506,6 @@ struct m66592 {
int interrupt;
int isochronous;
int num_dma;
- int bi_bufnum; /* bulk and isochronous's bufnum */
};
#define gadget_to_m66592(_gadget) container_of(_gadget, struct m66592, gadget)
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0de847cffb19..1a920c70b5a1 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -338,10 +338,10 @@ config USB_R8A66597_HCD
config SUPERH_ON_CHIP_R8A66597
boolean "Enable SuperH on-chip R8A66597 USB"
- depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723)
+ depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724)
help
This driver enables support for the on-chip R8A66597 in the
- SH7366 and SH7723 processors.
+ SH7366, SH7723 and SH7724 processors.
config USB_WHCI_HCD
tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index c2d80f80448b..16fecb8ecc39 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -418,7 +418,7 @@ static struct ed *ed_get (
is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
/* FIXME usbcore changes dev->devnum before SET_ADDRESS
- * suceeds ... otherwise we wouldn't need "pipe".
+ * succeeds ... otherwise we wouldn't need "pipe".
*/
info = usb_pipedevice (pipe);
ed->type = usb_pipetype(pipe);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 554a414f65d1..1191928902f4 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2168,8 +2168,9 @@ static int __devexit musb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+static int musb_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
unsigned long flags;
struct musb *musb = dev_to_musb(&pdev->dev);
@@ -2196,8 +2197,9 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message)
return 0;
}
-static int musb_resume_early(struct platform_device *pdev)
+static int musb_resume_noirq(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct musb *musb = dev_to_musb(&pdev->dev);
if (!musb->clock)
@@ -2215,9 +2217,14 @@ static int musb_resume_early(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops musb_dev_pm_ops = {
+ .suspend = musb_suspend,
+ .resume_noirq = musb_resume_noirq,
+};
+
+#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
#else
-#define musb_suspend NULL
-#define musb_resume_early NULL
+#define MUSB_DEV_PM_OPS NULL
#endif
static struct platform_driver musb_driver = {
@@ -2225,11 +2232,10 @@ static struct platform_driver musb_driver = {
.name = (char *)musb_driver_name,
.bus = &platform_bus_type,
.owner = THIS_MODULE,
+ .pm = MUSB_DEV_PM_OPS,
},
.remove = __devexit_p(musb_remove),
.shutdown = musb_shutdown,
- .suspend = musb_suspend,
- .resume_early = musb_resume_early,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 329e09814c01..9e6027b3a2e9 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -334,6 +334,9 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
{
struct usb_serial_port *port = tty->driver_data;
+ if (!port)
+ return;
+
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d6d65ef85f54..55022f33250a 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -616,6 +616,8 @@ config FB_STI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select STI_CONSOLE
+ select VT
default y
---help---
STI refers to the HP "Standard Text Interface" which is a set of
@@ -933,7 +935,7 @@ config FB_S1D13XXX
config FB_ATMEL
tristate "AT91/AT32 LCD Controller support"
- depends on FB && (ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 || AVR32)
+ depends on FB && (ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 || ARCH_AT91CAP9 || AVR32)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c6..497ff8af03ed 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2414,7 +2414,10 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
if (err)
return err;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- return fbhw->encode_fix(fix, &par);
+ mutex_lock(&info->mm_lock);
+ err = fbhw->encode_fix(fix, &par);
+ mutex_unlock(&info->mm_lock);
+ return err;
}
static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2746,9 @@ static int atafb_set_par(struct fb_info *info)
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
+ mutex_lock(&info->mm_lock);
fbhw->encode_fix(&info->fix, par);
+ mutex_unlock(&info->mm_lock);
/* Set new videomode */
ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f55..cb88394ba995 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -270,7 +270,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
smem_len = (var->xres_virtual * var->yres_virtual
* ((var->bits_per_pixel + 7) / 8));
+ mutex_lock(&info->mm_lock);
info->fix.smem_len = max(smem_len, sinfo->smem_len);
+ mutex_unlock(&info->mm_lock);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d3..1f39a62f899b 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
int mtrr_reg;
#endif
u32 mem_cntl;
+ struct crtc saved_crtc;
+ union aty_pll saved_pll;
};
/*
@@ -217,6 +219,7 @@ struct atyfb_par {
#define M64F_XL_DLL 0x00080000
#define M64F_MFB_FORCE_4 0x00100000
#define M64F_HW_TRIPLE 0x00200000
+#define M64F_XL_MEM 0x00400000
/*
* Register access
*/
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30b..63d3739d43a8 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
+#include <linux/reboot.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
static int store_video_par(char *videopar, unsigned char m64_num);
#endif
-static struct crtc saved_crtc;
-static union aty_pll saved_pll;
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
static int read_aty_sense(const struct atyfb_par *par);
#endif
+static DEFINE_MUTEX(reboot_lock);
+static struct fb_info *reboot_info;
/*
* Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
-#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4)
-#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS)
+#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
+#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
+static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
+ ram_sdram, ram_sgram, ram_wram, ram_resv
+};
+static char *aty_xl_ram[8] __devinitdata = {
+ ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
+static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
+{
+ u32 line_length = vxres * bpp / 8;
+
+ if (par->ram_type == SGRAM ||
+ (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
+ line_length = (line_length + 63) & ~63;
+
+ return line_length;
+}
+
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var, struct crtc *crtc)
{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
+ u32 line_length;
/* input */
- xres = var->xres;
+ xres = (var->xres + 7) & ~7;
yres = var->yres;
- vxres = var->xres_virtual;
+ vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
- xoffset = var->xoffset;
+ xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
} else
FAIL("invalid bpp");
- if (vxres * vyres * bpp / 8 > info->fix.smem_len)
+ line_length = calc_line_length(par, vxres, bpp);
+
+ if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
- crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19);
+ crtc->off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
}
aty_st_8(DAC_MASK, 0xff, par);
- info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8;
+ info->fix.line_length = calc_line_length(par, var->xres_virtual,
+ var->bits_per_pixel);
+
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
- u32 vxres = par->crtc.vxres;
+ u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
- par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19);
+ par->crtc.off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
}
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
const int *refresh_tbl;
int i, size;
- if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
+ if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
- ramname = aty_ct_ram[par->ram_type];
+ if (M64_HAS(XL_MEM))
+ ramname = aty_xl_ram[par->ram_type];
+ else
+ ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
- aty_get_crtc(par, &saved_crtc);
+ aty_get_crtc(par, &par->saved_crtc);
if(par->pll_ops->get_pll)
- par->pll_ops->get_pll(info, &saved_pll);
+ par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
aty_init_exit:
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
+ mutex_lock(&reboot_lock);
+ if (!reboot_info)
+ reboot_info = info;
+ mutex_unlock(&reboot_lock);
+
return 0;
err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
+ mutex_lock(&reboot_lock);
+ if (reboot_info == info)
+ reboot_info = NULL;
+ mutex_unlock(&reboot_lock);
+
atyfb_remove(info);
}
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
}
#endif /* MODULE */
+static int atyfb_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct atyfb_par *par;
+
+ if (code != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ mutex_lock(&reboot_lock);
+
+ if (!reboot_info)
+ goto out;
+
+ if (!lock_fb_info(reboot_info))
+ goto out;
+
+ par = reboot_info->par;
+
+ /*
+ * HP OmniBook 500's BIOS doesn't like the state of the
+ * hardware after atyfb has been used. Restore the hardware
+ * to the original state to allow successful reboots.
+ */
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(reboot_info, &par->saved_pll);
+
+ unlock_fb_info(reboot_info);
+ out:
+ mutex_unlock(&reboot_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block atyfb_reboot_notifier = {
+ .notifier_call = atyfb_reboot_notify,
+};
+
+static const struct dmi_system_id atyfb_reboot_ids[] = {
+ {
+ .ident = "HP OmniBook 500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
+ },
+ },
+
+ { }
+};
+
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
err2 = atyfb_atari_probe();
#endif
- return (err1 && err2) ? -ENODEV : 0;
+ if (err1 && err2)
+ return -ENODEV;
+
+ if (dmi_check_system(atyfb_reboot_ids))
+ register_reboot_notifier(&atyfb_reboot_notifier);
+
+ return 0;
}
static void __exit atyfb_exit(void)
{
+ if (dmi_check_system(atyfb_reboot_ids))
+ unregister_reboot_notifier(&atyfb_reboot_notifier);
+
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a2..51fcc0a2c94a 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
{
u32 pitch_value;
+ u32 vxres;
/* determine modal information from global mode structure */
- pitch_value = info->var.xres_virtual;
+ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
+ vxres = info->var.xres_virtual;
if (info->var.bits_per_pixel == 24) {
/* In 24 bpp, the engine is in 8 bpp - this requires that all */
/* horizontal coordinates and widths must be adjusted */
pitch_value *= 3;
+ vxres *= 3;
}
/* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
aty_st_le32(SC_LEFT, 0, par);
aty_st_le32(SC_TOP, 0, par);
aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
- aty_st_le32(SC_RIGHT, pitch_value - 1, par);
+ aty_st_le32(SC_RIGHT, vxres - 1, par);
/* set background color to minimum value (usually BLACK) */
aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 1dae7f8f3c6b..51422fc4f606 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL));
+ lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
if (lcd->buf == NULL) {
kfree(lcd);
return -ENOMEM;
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 7bad24ed04ef..108b89e09a80 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,7 @@
/*
* Cobalt server LCD frame buffer driver.
*
- * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0cd..53ea05645ff8 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
-__acquires(&info->lock)
-__releases(&info->lock)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1323,14 @@ __releases(&info->lock)
off = vma->vm_pgoff << PAGE_SHIFT;
if (!fb)
return -ENODEV;
+ mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
- mutex_lock(&info->lock);
res = fb->fb_mmap(info, vma);
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return res;
}
- mutex_lock(&info->lock);
-
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1338,13 @@ __releases(&info->lock)
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info)
break;
fb_info->node = i;
mutex_init(&fb_info->lock);
+ mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd7..0bf2190928d0 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
phys_addr_t phys;
+ u32 smem_len = info->fix.line_length * info->var.yres_virtual;
pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+ pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
- info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
- pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
- info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+ info->screen_base = fsl_diu_alloc(smem_len, &phys);
if (info->screen_base == NULL) {
printk(KERN_ERR "Unable to allocate fb memory\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (unsigned long) phys;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
- info->fix.smem_start,
- info->fix.smem_len);
+ info->fix.smem_start, info->fix.smem_len);
pr_debug("screen base %p\n", info->screen_base);
return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
fsl_diu_free(info->screen_base, info->fix.smem_len);
+ mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
+ mutex_unlock(&info->mm_lock);
}
/*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc89..71960672d721 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "I810");
+ mutex_lock(&info->mm_lock);
fix->smem_start = par->fb.physical;
fix->smem_len = par->fb.size;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->xpanstep = 8;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50c..59c3a2e14913 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
DBG(__func__)
+ mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
+ mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
}
static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2081,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
spin_lock_init(&ACCESS_FBINFO(lock.accel));
init_rwsem(&ACCESS_FBINFO(crtc2.lock));
init_rwsem(&ACCESS_FBINFO(altout.lock));
+ mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
ACCESS_FBINFO(irq_flags) = 0;
init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145d..909e10a11898 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,13 +289,16 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
#undef m2info
}
-static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) {
+static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
+{
struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
strcpy(fix->id, "MATROX DH");
+ mutex_lock(&m2info->fbcon.mm_lock);
fix->smem_start = m2info->video.base;
fix->smem_len = m2info->video.len_usable;
+ mutex_unlock(&m2info->fbcon.mm_lock);
fix->ypanstep = 1;
fix->ywrapstep = 0;
fix->xpanstep = 8; /* TBD */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e887..567fb944bd2a 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp)
}
static int mx3fb_blank(int blank, struct fb_info *fbi);
-static int mx3fb_map_video_memory(struct fb_info *fbi);
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len);
static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
if (fbi->fix.smem_start)
mx3fb_unmap_video_memory(fbi);
- fbi->fix.smem_len = mem_len;
- if (mx3fb_map_video_memory(fbi) < 0) {
+ if (mx3fb_map_video_memory(fbi, mem_len) < 0) {
mutex_unlock(&mx3_fbi->mutex);
return -ENOMEM;
}
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev)
/**
* mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
* @fbi: framebuffer information pointer
+ * @mem_len: length of mapped memory
* @return: Error code indicating success or failure
*
* This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev)
* area is remapped, all virtual memory access to the video memory should occur
* at the new region.
*/
-static int mx3fb_map_video_memory(struct fb_info *fbi)
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len)
{
int retval = 0;
dma_addr_t addr;
fbi->screen_base = dma_alloc_writecombine(fbi->device,
- fbi->fix.smem_len,
+ mem_len,
&addr, GFP_DMA);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
- fbi->fix.smem_len);
+ mem_len);
retval = -EBUSY;
goto err0;
}
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = addr;
+ fbi->fix.smem_len = mem_len;
+ mutex_unlock(&fbi->mm_lock);
dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
(uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
fbi->screen_base, fbi->fix.smem_start);
fbi->screen_base = 0;
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
return 0;
}
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 842639053d05..8862233d57b6 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
rg = &plane->fbdev->mem_desc.region[plane->idx];
fbi->screen_base = rg->vaddr;
+ mutex_lock(&fbi->mm_lock);
fix->smem_start = rg->paddr;
fix->smem_len = rg->size;
+ mutex_unlock(&fbi->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
* plane memory is dealloce'd, the other
* screen parameters in var / fix are invalid.
*/
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
}
}
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a0..bacfabd9ce16 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
+ mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c3288..6506117c134b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
ofb->video_mem_size = size;
+ mutex_lock(&ofb->fb.mm_lock);
ofb->fb.fix.smem_start = ofb->video_mem_phys;
ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
+ mutex_unlock(&ofb->fb.mm_lock);
ofb->fb.screen_base = ofb->video_mem;
return 0;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee3057..9f6d6e61f0cc 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
return 0;
}
-static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
- unsigned long stride)
-{
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, "sh7760-lcdc");
-
- fix->smem_start = (unsigned long)info->screen_base;
- fix->smem_len = info->screen_size;
-
- fix->line_length = stride;
-}
-
static int sh7760fb_get_color_info(struct device *dev,
u16 lddfr, int *bpp, int *gray)
{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
- encode_fix(&info->fix, info, stride);
+ info->fix.line_length = stride;
+
sh7760fb_check_var(&info->var, info);
sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
info->screen_base = fbmem;
info->screen_size = vram;
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
return 0;
}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
+ strcpy(info->fix.id, "sh7760-lcdc");
+
/* set the DON2 bit now, before cmap allocation, as it will randomize
* palette memory.
*/
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index f10d2fbeda06..da983b720f08 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
#include <video/sh_mobile_lcdc.h>
#include <asm/atomic.h>
@@ -33,6 +34,7 @@ struct sh_mobile_lcdc_chan {
struct fb_info info;
dma_addr_t dma_handle;
struct fb_deferred_io defio;
+ struct scatterlist *sglist;
unsigned long frame_end;
wait_queue_head_t frame_end_wait;
};
@@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {}
static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {}
#endif
+static int sh_mobile_lcdc_sginit(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct sh_mobile_lcdc_chan *ch = info->par;
+ unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT;
+ struct page *page;
+ int nr_pages = 0;
+
+ sg_init_table(ch->sglist, nr_pages_max);
+
+ list_for_each_entry(page, pagelist, lru)
+ sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0);
+
+ return nr_pages;
+}
+
static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct sh_mobile_lcdc_chan *ch = info->par;
+ unsigned int nr_pages;
/* enable clocks before accessing hardware */
sh_mobile_lcdc_clk_on(ch->lcdc);
+ nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
+ dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+
/* trigger panel update */
lcdc_write_chan(ch, LDSM2R, 1);
+
+ dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
}
static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -846,21 +870,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
}
for (i = 0; i < j; i++) {
- error = register_framebuffer(&priv->ch[i].info);
+ struct sh_mobile_lcdc_chan *ch = priv->ch + i;
+
+ info = &ch->info;
+
+ if (info->fbdefio) {
+ priv->ch->sglist = vmalloc(sizeof(struct scatterlist) *
+ info->fix.smem_len >> PAGE_SHIFT);
+ if (!priv->ch->sglist) {
+ dev_err(&pdev->dev, "cannot allocate sglist\n");
+ goto err1;
+ }
+ }
+
+ error = register_framebuffer(info);
if (error < 0)
goto err1;
- }
- for (i = 0; i < j; i++) {
- info = &priv->ch[i].info;
dev_info(info->dev,
"registered %s/%s as %dx%d %dbpp.\n",
pdev->name,
- (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ?
+ (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
"mainlcd" : "sublcd",
- (int) priv->ch[i].cfg.lcd_cfg.xres,
- (int) priv->ch[i].cfg.lcd_cfg.yres,
- priv->ch[i].cfg.bpp);
+ (int) ch->cfg.lcd_cfg.xres,
+ (int) ch->cfg.lcd_cfg.yres,
+ ch->cfg.bpp);
/* deferred io mode: disable clock to save power */
if (info->fbdefio)
@@ -892,6 +926,9 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
if (!info->device)
continue;
+ if (priv->ch[i].sglist)
+ vfree(priv->ch[i].sglist);
+
dma_free_coherent(&pdev->dev, info->fix.smem_len,
info->screen_base, priv->ch[i].dma_handle);
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d5..fd33455389b8 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
strcpy(fix->id, ivideo->myid);
+ mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
fix->smem_len = ivideo->sisfb_mem;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a06702..16d4f4c7d52b 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
#define SM501_MEMF_ACCEL (8)
static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+ unsigned int why, size_t size, u32 smem_len)
{
struct sm501fb_par *par;
struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
if (ptr > 0)
ptr &= ~(PAGE_SIZE - 1);
- if (fbi && ptr < fbi->fix.smem_len)
+ if (fbi && ptr < smem_len)
return -ENOMEM;
break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
case SM501_MEMF_ACCEL:
fbi = inf->fb[HEAD_CRT];
- ptr = fbi ? fbi->fix.smem_len : 0;
+ ptr = fbi ? smem_len : 0;
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
unsigned int mem_type;
unsigned int clock_type;
unsigned int head_addr;
+ unsigned int smem_len;
dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
__func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
/* allocate fb memory within 501 */
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
- info->fix.smem_len = info->fix.line_length * var->yres_virtual;
+ smem_len = info->fix.line_length * var->yres_virtual;
dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
info->fix.line_length);
- if (sm501_alloc_mem(fbi, &par->screen, mem_type,
- info->fix.smem_len)) {
+ if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
dev_err(fbi->dev, "no memory available\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_base = fbi->fbmem + par->screen.sm_addr;
info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
/* the head is displaying panel data... */
- sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0);
+ sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
+ info->fix.smem_len);
goto out_update;
}
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
par->cursor_regs = info->regs + reg_base;
- ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
+ ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
+ fbi->fix.smem_len);
if (ret < 0)
return ret;
@@ -1619,6 +1624,8 @@ static int __devinit sm501fb_start_one(struct sm501fb_info *info,
if (!fbi)
return 0;
+ mutex_init(&info->fb[head]->mm_lock);
+
ret = sm501fb_init_fb(info->fb[head], head, drvname);
if (ret) {
dev_err(info->dev, "cannot initialise fb %s\n", drvname);
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f10..8a141c2c637b 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
info->fix.ywrapstep = 0;
info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
+ mutex_lock(&info->mm_lock);
if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
par->extmem_active = 1;
info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
par->extmem_active = 0;
info->fix.smem_len = MEM_INT_SIZE+1;
}
+ mutex_unlock(&info->mm_lock);
w100fb_activate_var(par);
}
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 5c7011cda6a6..751c003864ad 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -161,7 +161,7 @@ static long bcm47xx_wdt_ioctl(struct file *file,
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
- int new_value, retval = -EINVAL;;
+ int new_value, retval = -EINVAL;
switch (cmd) {
case WDIOC_GETSUPPORT:
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index ee1caae4d33b..016245419fad 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -38,7 +38,7 @@
static unsigned long oscr_freq;
static unsigned long sa1100wdt_users;
-static int pre_margin;
+static unsigned int pre_margin;
static int boot_status;
/*
@@ -84,6 +84,7 @@ static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING,
.identity = "SA1100/PXA255 Watchdog",
+ .firmware_version = 1,
};
static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
@@ -118,7 +119,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
- if (time <= 0 || time > 255) {
+ if (time <= 0 || (oscr_freq * (long long)time >= 0xffffffff)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 916890abffdd..f201accc4e3d 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -89,6 +89,11 @@ static void w83627hf_select_wd_register(void)
c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
outb_p(0x2b, WDT_EFER);
outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */
+ } else if (c == 0x88) { /* W83627EHF */
+ outb_p(0x2d, WDT_EFER); /* select GPIO5 */
+ c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
+ outb_p(0x2d, WDT_EFER);
+ outb_p(c, WDT_EFDR); /* set GPIO5 to WDT0 */
}
outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index 883b5f79673a..a6c12dec91a1 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -149,8 +149,10 @@ static void wdt_ctrl(int timeout)
{
spin_lock(&io_lock);
- if (w83697ug_select_wd_register() < 0)
+ if (w83697ug_select_wd_register() < 0) {
+ spin_unlock(&io_lock);
return;
+ }
outb_p(0xF4, WDT_EFER); /* Select CRF4 */
outb_p(timeout, WDT_EFDR); /* Write Timeout counter to CRF4 */
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a4fe7a38d9b0..3bde56bce63a 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void)
*/
static int wdrtas_get_temperature(void)
{
- long result;
+ int result;
int temperature = 0;
- result = rtas_call(wdrtas_token_get_sensor_state, 2, 2,
- (void *)__pa(&temperature),
- WDRTAS_THERMAL_SENSOR, 0);
+ result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
if (result < 0)
printk(KERN_WARNING "wdrtas: reading the thermal sensor "
- "faild: %li\n", result);
+ "failed: %i\n", result);
else
temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 891d2e90753a..2f57276e87a2 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -47,10 +47,10 @@
static DEFINE_SPINLOCK(irq_mapping_update_lock);
/* IRQ <-> VIRQ mapping. */
-static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
+static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
-static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
+static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
/* Interrupt types. */
enum xen_irq_type {
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static DEFINE_PER_CPU(unsigned, xed_nesting_count);
+
/*
* Search the CPUs pending events bitmasks. For each one found, map
* the event number to an irq, and feed it into do_IRQ() for
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
- static DEFINE_PER_CPU(unsigned, nesting_count);
unsigned count;
exit_idle();
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
vcpu_info->evtchn_upcall_pending = 0;
- if (__get_cpu_var(nesting_count)++)
+ if (__get_cpu_var(xed_nesting_count)++)
goto out;
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
BUG_ON(!irqs_disabled());
- count = __get_cpu_var(nesting_count);
- __get_cpu_var(nesting_count) = 0;
+ count = __get_cpu_var(xed_nesting_count);
+ __get_cpu_var(xed_nesting_count) = 0;
} while(count != 1);
out:
@@ -927,9 +928,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
void __init xen_init_IRQ(void)
{
int i;
- size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
- cpu_evtchn_mask_p = alloc_bootmem(size);
+ cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
+ GFP_KERNEL);
BUG_ON(cpu_evtchn_mask_p == NULL);
init_evtchn_cpu_bindings();
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 210acafe4a9b..3ff8bdd18fb3 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -432,7 +432,6 @@ vfs_rejected_lock:
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
afs_defer_unlock(vnode, key);
- spin_unlock(&vnode->lock);
goto abort_attempt;
}
diff --git a/fs/aio.c b/fs/aio.c
index 76da12537956..d065b2c3273e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
assert_spin_locked(&ctx->ctx_lock);
+ if (req->ki_eventfd != NULL)
+ eventfd_ctx_put(req->ki_eventfd);
if (req->ki_dtor)
req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec)
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
/* Complete the fput(s) */
if (req->ki_filp != NULL)
__fput(req->ki_filp);
- if (req->ki_eventfd != NULL)
- __fput(req->ki_eventfd);
/* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock);
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
*/
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{
- int schedule_putreq = 0;
-
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count));
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
- schedule_putreq++;
- else
- req->ki_filp = NULL;
- if (req->ki_eventfd != NULL) {
- if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
- schedule_putreq++;
- else
- req->ki_eventfd = NULL;
- }
- if (unlikely(schedule_putreq)) {
+ if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
queue_work(aio_wq, &fput_work);
- } else
+ } else {
+ req->ki_filp = NULL;
really_put_req(ctx, req);
+ }
return 1;
}
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
- req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
+ req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9fa212b014a5..b7c1603cd4bd 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1522,11 +1522,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
info->thread = NULL;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
-
if (psinfo == NULL)
return 0;
+ fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+
/*
* Figure out how many notes we're going to need for each thread.
*/
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
goto out;
-
+ /*
+ * The number of segs are recored into ELF header as 16bit value.
+ * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+ */
segs = current->mm->map_count;
#ifdef ELF_CORE_EXTRA_PHDRS
segs += ELF_CORE_EXTRA_PHDRS;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 31c46a241bac..49a34e7f7306 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -1,7 +1,7 @@
/*
* bio-integrity.c - bio data integrity extensions
*
- * Copyright (C) 2007, 2008 Oracle Corporation
+ * Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
@@ -25,63 +25,121 @@
#include <linux/bio.h>
#include <linux/workqueue.h>
-static struct kmem_cache *bio_integrity_slab __read_mostly;
-static mempool_t *bio_integrity_pool;
-static struct bio_set *integrity_bio_set;
+struct integrity_slab {
+ struct kmem_cache *slab;
+ unsigned short nr_vecs;
+ char name[8];
+};
+
+#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
+struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
+ IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
+};
+#undef IS
+
static struct workqueue_struct *kintegrityd_wq;
+static inline unsigned int vecs_to_idx(unsigned int nr)
+{
+ switch (nr) {
+ case 1:
+ return 0;
+ case 2 ... 4:
+ return 1;
+ case 5 ... 16:
+ return 2;
+ case 17 ... 64:
+ return 3;
+ case 65 ... 128:
+ return 4;
+ case 129 ... BIO_MAX_PAGES:
+ return 5;
+ default:
+ BUG();
+ }
+}
+
+static inline int use_bip_pool(unsigned int idx)
+{
+ if (idx == BIOVEC_NR_POOLS)
+ return 1;
+
+ return 0;
+}
+
/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
+ * @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs)
+struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs,
+ struct bio_set *bs)
{
struct bio_integrity_payload *bip;
- struct bio_vec *iv;
- unsigned long idx;
+ unsigned int idx = vecs_to_idx(nr_vecs);
BUG_ON(bio == NULL);
+ bip = NULL;
- bip = mempool_alloc(bio_integrity_pool, gfp_mask);
- if (unlikely(bip == NULL)) {
- printk(KERN_ERR "%s: could not alloc bip\n", __func__);
- return NULL;
- }
+ /* Lower order allocations come straight from slab */
+ if (!use_bip_pool(idx))
+ bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
- memset(bip, 0, sizeof(*bip));
+ /* Use mempool if lower order alloc failed or max vecs were requested */
+ if (bip == NULL) {
+ bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
- iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set);
- if (unlikely(iv == NULL)) {
- printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__);
- mempool_free(bip, bio_integrity_pool);
- return NULL;
+ if (unlikely(bip == NULL)) {
+ printk(KERN_ERR "%s: could not alloc bip\n", __func__);
+ return NULL;
+ }
}
- bip->bip_pool = idx;
- bip->bip_vec = iv;
+ memset(bip, 0, sizeof(*bip));
+
+ bip->bip_slab = idx;
bip->bip_bio = bio;
bio->bi_integrity = bip;
return bip;
}
+EXPORT_SYMBOL(bio_integrity_alloc_bioset);
+
+/**
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * @bio: bio to attach integrity metadata to
+ * @gfp_mask: Memory allocation mask
+ * @nr_vecs: Number of integrity metadata scatter-gather elements
+ *
+ * Description: This function prepares a bio for attaching integrity
+ * metadata. nr_vecs specifies the maximum number of pages containing
+ * integrity metadata that can be attached.
+ */
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
+{
+ return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
+}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
+ * @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
-void bio_integrity_free(struct bio *bio)
+void bio_integrity_free(struct bio *bio, struct bio_set *bs)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
- bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool);
- mempool_free(bip, bio_integrity_pool);
+ if (use_bip_pool(bip->bip_slab))
+ mempool_free(bip, bs->bio_integrity_pool);
+ else
+ kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
bio->bi_integrity = NULL;
}
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) {
+ if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
bp->iv1 = bip->bip_vec[0];
bp->iv2 = bip->bip_vec[0];
- bp->bip1.bip_vec = &bp->iv1;
- bp->bip2.bip_vec = &bp->iv2;
+ bp->bip1.bip_vec[0] = bp->iv1;
+ bp->bip2.bip_vec[0] = bp->iv2;
bp->iv1.bv_len = sectors * bi->tuple_size;
bp->iv2.bv_offset += sectors * bi->tuple_size;
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split);
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
+ * @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
-int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask, struct bio_set *bs)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
+ bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
if (bip == NULL)
return -EIO;
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
}
EXPORT_SYMBOL(bio_integrity_clone);
-static int __init bio_integrity_init(void)
+int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
- kintegrityd_wq = create_workqueue("kintegrityd");
+ unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
+
+ bs->bio_integrity_pool =
+ mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
+ if (!bs->bio_integrity_pool)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL(bioset_integrity_create);
+
+void bioset_integrity_free(struct bio_set *bs)
+{
+ if (bs->bio_integrity_pool)
+ mempool_destroy(bs->bio_integrity_pool);
+}
+EXPORT_SYMBOL(bioset_integrity_free);
+
+void __init bio_integrity_init(void)
+{
+ unsigned int i;
+
+ kintegrityd_wq = create_workqueue("kintegrityd");
if (!kintegrityd_wq)
panic("Failed to create kintegrityd\n");
- bio_integrity_slab = KMEM_CACHE(bio_integrity_payload,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
+ unsigned int size;
- bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE,
- bio_integrity_slab);
- if (!bio_integrity_pool)
- panic("bio_integrity: can't allocate bip pool\n");
+ size = sizeof(struct bio_integrity_payload)
+ + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
- integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0);
- if (!integrity_bio_set)
- panic("bio_integrity: can't allocate bio_set\n");
-
- return 0;
+ bip_slab[i].slab =
+ kmem_cache_create(bip_slab[i].name, size, 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ }
}
-subsys_initcall(bio_integrity_init);
diff --git a/fs/bio.c b/fs/bio.c
index 24c914043532..1486b19fc431 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio))
- bio_integrity_free(bio);
+ bio_integrity_free(bio, bs);
/*
* If we have front padding, adjust the bio pointer before freeing
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
static void bio_kmalloc_destructor(struct bio *bio)
{
if (bio_integrity(bio))
- bio_integrity_free(bio);
+ bio_integrity_free(bio, fs_bio_set);
kfree(bio);
}
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
if (bio_integrity(bio)) {
int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask);
+ ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
if (ret < 0) {
bio_put(b);
@@ -1539,6 +1539,7 @@ void bioset_free(struct bio_set *bs)
if (bs->bio_pool)
mempool_destroy(bs->bio_pool);
+ bioset_integrity_free(bs);
biovec_free_pools(bs);
bio_put_slab(bs);
@@ -1579,6 +1580,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
+ if (bioset_integrity_create(bs, pool_size))
+ goto bad;
+
if (!biovec_create_pools(bs, pool_size))
return bs;
@@ -1616,6 +1620,7 @@ static int __init init_bio(void)
if (!bio_slabs)
panic("bio: can't allocate bios\n");
+ bio_integrity_init();
biovec_init_slabs();
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7f88628a1a72..6e4f6c50a120 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
"btrfs-%s-%d", workers->name,
workers->num_workers + i);
if (IS_ERR(worker->task)) {
- kfree(worker);
ret = PTR_ERR(worker->task);
+ kfree(worker);
goto fail;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2779c2f5360a..98a873838717 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2074,8 +2074,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root);
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d28d29c95f7c..027c8d31f13b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1352,6 +1352,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
int err;
+ bdi->name = "btrfs";
bdi->capabilities = BDI_CAP_MAP_COPY;
err = bdi_init(bdi);
if (err)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index edc7d208c5ce..a5aca3997d42 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -990,15 +990,13 @@ static inline int extent_ref_type(u64 parent, u64 owner)
return type;
}
-static int find_next_key(struct btrfs_path *path, struct btrfs_key *key)
+static int find_next_key(struct btrfs_path *path, int level,
+ struct btrfs_key *key)
{
- int level;
- BUG_ON(!path->keep_locks);
- for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+ for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path->nodes[level])
break;
- btrfs_assert_tree_locked(path->nodes[level]);
if (path->slots[level] + 1 >=
btrfs_header_nritems(path->nodes[level]))
continue;
@@ -1158,7 +1156,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
* For simplicity, we just do not add new inline back
* ref if there is any kind of item for this block
*/
- if (find_next_key(path, &key) == 0 && key.objectid == bytenr &&
+ if (find_next_key(path, 0, &key) == 0 &&
+ key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
err = -EAGAIN;
goto out;
@@ -2697,7 +2696,7 @@ again:
printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
", %llu bytes_used, %llu bytes_reserved, "
- "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
+ "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
"%llu total\n", (unsigned long long)bytes,
(unsigned long long)data_sinfo->bytes_delalloc,
(unsigned long long)data_sinfo->bytes_used,
@@ -4128,6 +4127,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return buf;
}
+#if 0
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *leaf)
{
@@ -4171,8 +4171,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
return 0;
}
-#if 0
-
static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_leaf_ref *ref)
@@ -4553,262 +4551,471 @@ out:
}
#endif
+struct walk_control {
+ u64 refs[BTRFS_MAX_LEVEL];
+ u64 flags[BTRFS_MAX_LEVEL];
+ struct btrfs_key update_progress;
+ int stage;
+ int level;
+ int shared_level;
+ int update_ref;
+ int keep_locks;
+};
+
+#define DROP_REFERENCE 1
+#define UPDATE_BACKREF 2
+
/*
- * helper function for drop_subtree, this function is similar to
- * walk_down_tree. The main difference is that it checks reference
- * counts while tree blocks are locked.
+ * hepler to process tree block while walking down the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function checks
+ * reference count of the block. if the block is shared and
+ * we need update back refs for the subtree rooted at the
+ * block, this function changes wc->stage to UPDATE_BACKREF
+ *
+ * when wc->stage == UPDATE_BACKREF, this function updates
+ * back refs for pointers in the block.
+ *
+ * NOTE: return value 1 means we should stop walking down.
*/
-static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct btrfs_path *path, int *level)
+ struct btrfs_path *path,
+ struct walk_control *wc)
{
- struct extent_buffer *next;
- struct extent_buffer *cur;
- struct extent_buffer *parent;
- u64 bytenr;
- u64 ptr_gen;
- u64 refs;
- u64 flags;
- u32 blocksize;
+ int level = wc->level;
+ struct extent_buffer *eb = path->nodes[level];
+ struct btrfs_key key;
+ u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
- cur = path->nodes[*level];
- ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len,
- &refs, &flags);
- BUG_ON(ret);
- if (refs > 1)
- goto out;
+ if (wc->stage == UPDATE_BACKREF &&
+ btrfs_header_owner(eb) != root->root_key.objectid)
+ return 1;
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ /*
+ * when reference count of tree block is 1, it won't increase
+ * again. once full backref flag is set, we never clear it.
+ */
+ if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
+ BUG_ON(!path->locks[level]);
+ ret = btrfs_lookup_extent_info(trans, root,
+ eb->start, eb->len,
+ &wc->refs[level],
+ &wc->flags[level]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level] == 0);
+ }
- while (*level >= 0) {
- cur = path->nodes[*level];
- if (*level == 0) {
- ret = btrfs_drop_leaf_ref(trans, root, cur);
- BUG_ON(ret);
- clean_tree_block(trans, root, cur);
- break;
- }
- if (path->slots[*level] >= btrfs_header_nritems(cur)) {
- clean_tree_block(trans, root, cur);
- break;
+ if (wc->stage == DROP_REFERENCE &&
+ wc->update_ref && wc->refs[level] > 1) {
+ BUG_ON(eb == root->node);
+ BUG_ON(path->slots[level] > 0);
+ if (level == 0)
+ btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
+ else
+ btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
+ if (btrfs_header_owner(eb) == root->root_key.objectid &&
+ btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
+ wc->stage = UPDATE_BACKREF;
+ wc->shared_level = level;
}
+ }
- bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
- blocksize = btrfs_level_size(root, *level - 1);
- ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+ if (wc->stage == DROP_REFERENCE) {
+ if (wc->refs[level] > 1)
+ return 1;
- next = read_tree_block(root, bytenr, blocksize, ptr_gen);
- btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ if (path->locks[level] && !wc->keep_locks) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ }
+ return 0;
+ }
- ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
- &refs, &flags);
+ /* wc->stage == UPDATE_BACKREF */
+ if (!(wc->flags[level] & flag)) {
+ BUG_ON(!path->locks[level]);
+ ret = btrfs_inc_ref(trans, root, eb, 1);
BUG_ON(ret);
- if (refs > 1) {
- parent = path->nodes[*level];
- ret = btrfs_free_extent(trans, root, bytenr,
- blocksize, parent->start,
- btrfs_header_owner(parent),
- *level - 1, 0);
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret);
+ ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+ eb->len, flag, 0);
+ BUG_ON(ret);
+ wc->flags[level] |= flag;
+ }
+
+ /*
+ * the block is shared by multiple trees, so it's not good to
+ * keep the tree lock
+ */
+ if (path->locks[level] && level > 0) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ }
+ return 0;
+}
+
+/*
+ * hepler to process tree block while walking up the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function drops
+ * reference count on the block.
+ *
+ * when wc->stage == UPDATE_BACKREF, this function changes
+ * wc->stage back to DROP_REFERENCE if we changed wc->stage
+ * to UPDATE_BACKREF previously while processing the block.
+ *
+ * NOTE: return value 1 means we should stop walking up.
+ */
+static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ int ret = 0;
+ int level = wc->level;
+ struct extent_buffer *eb = path->nodes[level];
+ u64 parent = 0;
+
+ if (wc->stage == UPDATE_BACKREF) {
+ BUG_ON(wc->shared_level < level);
+ if (level < wc->shared_level)
+ goto out;
+
+ BUG_ON(wc->refs[level] <= 1);
+ ret = find_next_key(path, level + 1, &wc->update_progress);
+ if (ret > 0)
+ wc->update_ref = 0;
+
+ wc->stage = DROP_REFERENCE;
+ wc->shared_level = -1;
+ path->slots[level] = 0;
+
+ /*
+ * check reference count again if the block isn't locked.
+ * we should start walking down the tree again if reference
+ * count is one.
+ */
+ if (!path->locks[level]) {
+ BUG_ON(level == 0);
+ btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
+ path->locks[level] = 1;
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ eb->start, eb->len,
+ &wc->refs[level],
+ &wc->flags[level]);
BUG_ON(ret);
- path->slots[*level]++;
- btrfs_tree_unlock(next);
- free_extent_buffer(next);
- continue;
+ BUG_ON(wc->refs[level] == 0);
+ if (wc->refs[level] == 1) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ return 1;
+ }
+ } else {
+ BUG_ON(level != 0);
}
+ }
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ /* wc->stage == DROP_REFERENCE */
+ BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
- *level = btrfs_header_level(next);
- path->nodes[*level] = next;
- path->slots[*level] = 0;
- path->locks[*level] = 1;
- cond_resched();
+ if (wc->refs[level] == 1) {
+ if (level == 0) {
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ ret = btrfs_dec_ref(trans, root, eb, 1);
+ else
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret);
+ }
+ /* make block locked assertion in clean_tree_block happy */
+ if (!path->locks[level] &&
+ btrfs_header_generation(eb) == trans->transid) {
+ btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
+ path->locks[level] = 1;
+ }
+ clean_tree_block(trans, root, eb);
+ }
+
+ if (eb == root->node) {
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ parent = eb->start;
+ else
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(eb));
+ } else {
+ if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ parent = path->nodes[level + 1]->start;
+ else
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level + 1]));
}
-out:
- if (path->nodes[*level] == root->node)
- parent = path->nodes[*level];
- else
- parent = path->nodes[*level + 1];
- bytenr = path->nodes[*level]->start;
- blocksize = path->nodes[*level]->len;
- ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start,
- btrfs_header_owner(parent), *level, 0);
+ ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
+ root->root_key.objectid, level, 0);
BUG_ON(ret);
+out:
+ wc->refs[level] = 0;
+ wc->flags[level] = 0;
+ return ret;
+}
+
+static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ struct extent_buffer *next;
+ struct extent_buffer *cur;
+ u64 bytenr;
+ u64 ptr_gen;
+ u32 blocksize;
+ int level = wc->level;
+ int ret;
+
+ while (level >= 0) {
+ cur = path->nodes[level];
+ BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
- if (path->locks[*level]) {
- btrfs_tree_unlock(path->nodes[*level]);
- path->locks[*level] = 0;
+ ret = walk_down_proc(trans, root, path, wc);
+ if (ret > 0)
+ break;
+
+ if (level == 0)
+ break;
+
+ bytenr = btrfs_node_blockptr(cur, path->slots[level]);
+ blocksize = btrfs_level_size(root, level - 1);
+ ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
+
+ next = read_tree_block(root, bytenr, blocksize, ptr_gen);
+ btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
+
+ level--;
+ BUG_ON(level != btrfs_header_level(next));
+ path->nodes[level] = next;
+ path->slots[level] = 0;
+ path->locks[level] = 1;
+ wc->level = level;
}
- free_extent_buffer(path->nodes[*level]);
- path->nodes[*level] = NULL;
- *level += 1;
- cond_resched();
return 0;
}
-/*
- * helper for dropping snapshots. This walks back up the tree in the path
- * to find the first node higher up where we haven't yet gone through
- * all the slots
- */
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- int *level, int max_level)
+ struct walk_control *wc, int max_level)
{
- struct btrfs_root_item *root_item = &root->root_item;
- int i;
- int slot;
+ int level = wc->level;
int ret;
- for (i = *level; i < max_level && path->nodes[i]; i++) {
- slot = path->slots[i];
- if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
- /*
- * there is more work to do in this level.
- * Update the drop_progress marker to reflect
- * the work we've done so far, and then bump
- * the slot number
- */
- path->slots[i]++;
- WARN_ON(*level == 0);
- if (max_level == BTRFS_MAX_LEVEL) {
- btrfs_node_key(path->nodes[i],
- &root_item->drop_progress,
- path->slots[i]);
- root_item->drop_level = i;
- }
- *level = i;
+ path->slots[level] = btrfs_header_nritems(path->nodes[level]);
+ while (level < max_level && path->nodes[level]) {
+ wc->level = level;
+ if (path->slots[level] + 1 <
+ btrfs_header_nritems(path->nodes[level])) {
+ path->slots[level]++;
return 0;
} else {
- struct extent_buffer *parent;
-
- /*
- * this whole node is done, free our reference
- * on it and go up one level
- */
- if (path->nodes[*level] == root->node)
- parent = path->nodes[*level];
- else
- parent = path->nodes[*level + 1];
+ ret = walk_up_proc(trans, root, path, wc);
+ if (ret > 0)
+ return 0;
- clean_tree_block(trans, root, path->nodes[i]);
- ret = btrfs_free_extent(trans, root,
- path->nodes[i]->start,
- path->nodes[i]->len,
- parent->start,
- btrfs_header_owner(parent),
- *level, 0);
- BUG_ON(ret);
- if (path->locks[*level]) {
- btrfs_tree_unlock(path->nodes[i]);
- path->locks[i] = 0;
+ if (path->locks[level]) {
+ btrfs_tree_unlock(path->nodes[level]);
+ path->locks[level] = 0;
}
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- *level = i + 1;
+ free_extent_buffer(path->nodes[level]);
+ path->nodes[level] = NULL;
+ level++;
}
}
return 1;
}
/*
- * drop the reference count on the tree rooted at 'snap'. This traverses
- * the tree freeing any blocks that have a ref count of zero after being
- * decremented.
+ * drop a subvolume tree.
+ *
+ * this function traverses the tree freeing any blocks that only
+ * referenced by the tree.
+ *
+ * when a shared tree block is found. this function decreases its
+ * reference count by one. if update_ref is true, this function
+ * also make sure backrefs for the shared block and all lower level
+ * blocks are properly updated.
*/
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root)
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
{
- int ret = 0;
- int wret;
- int level;
struct btrfs_path *path;
- int update_count;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *tree_root = root->fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
+ struct walk_control *wc;
+ struct btrfs_key key;
+ int err = 0;
+ int ret;
+ int level;
path = btrfs_alloc_path();
BUG_ON(!path);
- level = btrfs_header_level(root->node);
+ wc = kzalloc(sizeof(*wc), GFP_NOFS);
+ BUG_ON(!wc);
+
+ trans = btrfs_start_transaction(tree_root, 1);
+
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
+ level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
btrfs_set_lock_blocking(path->nodes[level]);
path->slots[level] = 0;
path->locks[level] = 1;
+ memset(&wc->update_progress, 0,
+ sizeof(wc->update_progress));
} else {
- struct btrfs_key key;
- struct btrfs_disk_key found_key;
- struct extent_buffer *node;
-
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
+ memcpy(&wc->update_progress, &key,
+ sizeof(wc->update_progress));
+
level = root_item->drop_level;
+ BUG_ON(level == 0);
path->lowest_level = level;
- wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (wret < 0) {
- ret = wret;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ path->lowest_level = 0;
+ if (ret < 0) {
+ err = ret;
goto out;
}
- node = path->nodes[level];
- btrfs_node_key(node, &found_key, path->slots[level]);
- WARN_ON(memcmp(&found_key, &root_item->drop_progress,
- sizeof(found_key)));
+ btrfs_node_key_to_cpu(path->nodes[level], &key,
+ path->slots[level]);
+ WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
+
/*
* unlock our path, this is safe because only this
* function is allowed to delete this snapshot
*/
btrfs_unlock_up_safe(path, 0);
+
+ level = btrfs_header_level(root->node);
+ while (1) {
+ btrfs_tree_lock(path->nodes[level]);
+ btrfs_set_lock_blocking(path->nodes[level]);
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ path->nodes[level]->start,
+ path->nodes[level]->len,
+ &wc->refs[level],
+ &wc->flags[level]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level] == 0);
+
+ if (level == root_item->drop_level)
+ break;
+
+ btrfs_tree_unlock(path->nodes[level]);
+ WARN_ON(wc->refs[level] != 1);
+ level--;
+ }
}
+
+ wc->level = level;
+ wc->shared_level = -1;
+ wc->stage = DROP_REFERENCE;
+ wc->update_ref = update_ref;
+ wc->keep_locks = 0;
+
while (1) {
- unsigned long update;
- wret = walk_down_tree(trans, root, path, &level);
- if (wret > 0)
+ ret = walk_down_tree(trans, root, path, wc);
+ if (ret < 0) {
+ err = ret;
break;
- if (wret < 0)
- ret = wret;
+ }
- wret = walk_up_tree(trans, root, path, &level,
- BTRFS_MAX_LEVEL);
- if (wret > 0)
+ ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
+ if (ret < 0) {
+ err = ret;
break;
- if (wret < 0)
- ret = wret;
- if (trans->transaction->in_commit ||
- trans->transaction->delayed_refs.flushing) {
- ret = -EAGAIN;
+ }
+
+ if (ret > 0) {
+ BUG_ON(wc->stage != DROP_REFERENCE);
break;
}
- for (update_count = 0; update_count < 16; update_count++) {
+
+ if (wc->stage == DROP_REFERENCE) {
+ level = wc->level;
+ btrfs_node_key(path->nodes[level],
+ &root_item->drop_progress,
+ path->slots[level]);
+ root_item->drop_level = level;
+ }
+
+ BUG_ON(wc->level == 0);
+ if (trans->transaction->in_commit ||
+ trans->transaction->delayed_refs.flushing) {
+ ret = btrfs_update_root(trans, tree_root,
+ &root->root_key,
+ root_item);
+ BUG_ON(ret);
+
+ btrfs_end_transaction(trans, tree_root);
+ trans = btrfs_start_transaction(tree_root, 1);
+ } else {
+ unsigned long update;
update = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (update)
- btrfs_run_delayed_refs(trans, root, update);
- else
- break;
+ btrfs_run_delayed_refs(trans, tree_root,
+ update);
}
}
+ btrfs_release_path(root, path);
+ BUG_ON(err);
+
+ ret = btrfs_del_root(trans, tree_root, &root->root_key);
+ BUG_ON(ret);
+
+ free_extent_buffer(root->node);
+ free_extent_buffer(root->commit_root);
+ kfree(root);
out:
+ btrfs_end_transaction(trans, tree_root);
+ kfree(wc);
btrfs_free_path(path);
- return ret;
+ return err;
}
+/*
+ * drop subtree rooted at tree block 'node'.
+ *
+ * NOTE: this function will unlock and release tree block 'node'
+ */
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent)
{
struct btrfs_path *path;
+ struct walk_control *wc;
int level;
int parent_level;
int ret = 0;
int wret;
+ BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+
path = btrfs_alloc_path();
BUG_ON(!path);
+ wc = kzalloc(sizeof(*wc), GFP_NOFS);
+ BUG_ON(!wc);
+
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
extent_buffer_get(parent);
@@ -4817,24 +5024,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(node);
level = btrfs_header_level(node);
- extent_buffer_get(node);
path->nodes[level] = node;
path->slots[level] = 0;
+ path->locks[level] = 1;
+
+ wc->refs[parent_level] = 1;
+ wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ wc->level = level;
+ wc->shared_level = -1;
+ wc->stage = DROP_REFERENCE;
+ wc->update_ref = 0;
+ wc->keep_locks = 1;
while (1) {
- wret = walk_down_tree(trans, root, path, &level);
- if (wret < 0)
+ wret = walk_down_tree(trans, root, path, wc);
+ if (wret < 0) {
ret = wret;
- if (wret != 0)
break;
+ }
- wret = walk_up_tree(trans, root, path, &level, parent_level);
+ wret = walk_up_tree(trans, root, path, wc, parent_level);
if (wret < 0)
ret = wret;
if (wret != 0)
break;
}
+ kfree(wc);
btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 126477eaecf5..7c3cd248d8d6 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -151,7 +151,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
}
if (end_pos > isize) {
i_size_write(inode, end_pos);
- btrfs_update_inode(trans, root, inode);
+ /* we've only changed i_size in ram, and we haven't updated
+ * the disk i_size. There is no need to log the inode
+ * at this time.
+ */
}
err = btrfs_end_transaction(trans, root);
out_unlock:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dbe1aabf96cd..7ffa3d34ea19 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3580,12 +3580,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
owner = 1;
BTRFS_I(inode)->block_group =
btrfs_find_block_group(root, 0, alloc_hint, owner);
- if ((mode & S_IFREG)) {
- if (btrfs_test_opt(root, NODATASUM))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
- }
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -3640,6 +3634,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_inherit_iflags(inode, dir);
+ if ((mode & S_IFREG)) {
+ if (btrfs_test_opt(root, NODATASUM))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+ if (btrfs_test_opt(root, NODATACOW))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+ }
+
insert_inode_hash(inode);
inode_tree_add(inode);
return inode;
@@ -5082,6 +5083,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
struct btrfs_trans_handle *trans;
+ struct btrfs_root *root;
int ret;
alloc_start = offset & ~mask;
@@ -5100,6 +5102,13 @@ static long btrfs_fallocate(struct inode *inode, int mode,
goto out;
}
+ root = BTRFS_I(inode)->root;
+
+ ret = btrfs_check_data_free_space(root, inode,
+ alloc_end - alloc_start);
+ if (ret)
+ goto out;
+
locked_end = alloc_end - 1;
while (1) {
struct btrfs_ordered_extent *ordered;
@@ -5107,7 +5116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
if (!trans) {
ret = -EIO;
- goto out;
+ goto out_free;
}
/* the extent lock is ordered inside the running
@@ -5168,6 +5177,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
GFP_NOFS);
btrfs_end_transaction(trans, BTRFS_I(inode)->root);
+out_free:
+ btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
out:
mutex_unlock(&inode->i_mutex);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index eff18f5b5362..9f4db848db10 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1028,7 +1028,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
struct btrfs_file_extent_item);
comp = btrfs_file_extent_compression(leaf, extent);
type = btrfs_file_extent_type(leaf, extent);
- if (type == BTRFS_FILE_EXTENT_REG) {
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
disko = btrfs_file_extent_disk_bytenr(leaf,
extent);
diskl = btrfs_file_extent_disk_num_bytes(leaf,
@@ -1051,7 +1052,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.objectid = inode->i_ino;
new_key.offset = key.offset + destoff - off;
- if (type == BTRFS_FILE_EXTENT_REG) {
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b23dc209ae10..008397934778 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1788,7 +1788,7 @@ static void merge_func(struct btrfs_work *work)
btrfs_end_transaction(trans, root);
}
- btrfs_drop_dead_root(reloc_root);
+ btrfs_drop_snapshot(reloc_root, 0);
if (atomic_dec_and_test(async->num_pending))
complete(async->done);
@@ -2075,9 +2075,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
BUG_ON(ret);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
}
if (!lowest) {
btrfs_tree_unlock(upper->eb);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4e83457ea253..2dbf1c1f56ee 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -593,6 +593,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
return 0;
}
+#if 0
/*
* when dropping snapshots, we generate a ton of delayed refs, and it makes
* sense not to join the transaction while it is trying to flush the current
@@ -681,6 +682,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root)
btrfs_btree_balance_dirty(tree_root, nr);
return ret;
}
+#endif
/*
* new snapshots need to be created at a very specific time in the
@@ -1081,7 +1083,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
while (!list_empty(&list)) {
root = list_entry(list.next, struct btrfs_root, root_list);
list_del_init(&root->root_list);
- btrfs_drop_dead_root(root);
+ btrfs_drop_snapshot(root, 0);
}
return 0;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index a3ef091a45bd..2a01b2bc27ba 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -281,7 +281,7 @@ static void free_more_memory(void)
struct zone *zone;
int nid;
- wakeup_pdflush(1024);
+ wakeup_flusher_threads(1024);
yield();
for_each_online_node(nid) {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index b7c9d5187a75..a8514ad80b59 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -32,6 +32,7 @@
* - no readahead or I/O queue unplugging required
*/
struct backing_dev_info directly_mappable_cdev_bdi = {
+ .name = "char",
.capabilities = (
#ifdef CONFIG_MMU
/* permit private copies of the data to be taken */
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index b48689839428..3a9b7a58a51d 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -5,7 +5,7 @@ client generated ones by default (mount option "serverino" turned
on by default if server supports it). Add forceuid and forcegid
mount options (so that when negotiating unix extensions specifying
which uid mounted does not immediately force the server's reported
-uids to be overridden).
+uids to be overridden). Add support for scope moutn parm.
Version 1.58
------------
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 1b09f1670061..20692fbfdb24 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -49,6 +49,7 @@
#define ASN1_OJI 6 /* Object Identifier */
#define ASN1_OJD 7 /* Object Description */
#define ASN1_EXT 8 /* External */
+#define ASN1_ENUM 10 /* Enumerated */
#define ASN1_SEQ 16 /* Sequence */
#define ASN1_SET 17 /* Set */
#define ASN1_NUMSTR 18 /* Numerical String */
@@ -78,10 +79,12 @@
#define SPNEGO_OID_LEN 7
#define NTLMSSP_OID_LEN 10
#define KRB5_OID_LEN 7
+#define KRB5U2U_OID_LEN 8
#define MSKRB5_OID_LEN 7
static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
+static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
/*
@@ -122,6 +125,28 @@ asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
return 1;
}
+#if 0 /* will be needed later by spnego decoding/encoding of ntlmssp */
+static unsigned char
+asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val)
+{
+ unsigned char ch;
+
+ if (ctx->pointer >= ctx->end) {
+ ctx->error = ASN1_ERR_DEC_EMPTY;
+ return 0;
+ }
+
+ ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to lenght octet */
+ if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */
+ *val = *(++(ctx->pointer)); /* value has enum value */
+ else
+ return 0;
+
+ ctx->pointer++;
+ return 1;
+}
+#endif
+
static unsigned char
asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
{
@@ -476,10 +501,9 @@ decode_negTokenInit(unsigned char *security_blob, int length,
unsigned int cls, con, tag, oidlen, rc;
bool use_ntlmssp = false;
bool use_kerberos = false;
+ bool use_kerberosu2u = false;
bool use_mskerberos = false;
- *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/
-
/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
asn1_open(&ctx, security_blob, length);
@@ -515,6 +539,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* SPNEGO */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding negTokenInit"));
return 0;
@@ -526,6 +551,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* negTokenInit */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding negTokenInit"));
return 0;
@@ -537,6 +563,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
@@ -548,6 +575,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* sequence of */
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding 2nd part of negTokenInit"));
@@ -560,6 +588,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
return 0;
}
+ /* list of security mechanisms */
while (!asn1_eoc_decode(&ctx, sequence_end)) {
rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
if (!rc) {
@@ -576,11 +605,15 @@ decode_negTokenInit(unsigned char *security_blob, int length,
if (compare_oid(oid, oidlen, MSKRB5_OID,
MSKRB5_OID_LEN) &&
- !use_kerberos)
+ !use_mskerberos)
use_mskerberos = true;
+ else if (compare_oid(oid, oidlen, KRB5U2U_OID,
+ KRB5U2U_OID_LEN) &&
+ !use_kerberosu2u)
+ use_kerberosu2u = true;
else if (compare_oid(oid, oidlen, KRB5_OID,
KRB5_OID_LEN) &&
- !use_mskerberos)
+ !use_kerberos)
use_kerberos = true;
else if (compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN))
@@ -593,7 +626,12 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
}
+ /* mechlistMIC */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+ /* Check if we have reached the end of the blob, but with
+ no mechListMic (e.g. NTLMSSP instead of KRB5) */
+ if (ctx.error == ASN1_ERR_DEC_EMPTY)
+ goto decode_negtoken_exit;
cFYI(1, ("Error decoding last part negTokenInit exit3"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
@@ -602,6 +640,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
cls, con, tag, end, *end));
return 0;
}
+
+ /* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit5"));
return 0;
@@ -611,6 +651,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
cls, con, tag, end, *end));
}
+ /* sequence of */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit 7"));
return 0;
@@ -619,6 +660,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
cls, con, tag, end, *end));
return 0;
}
+
+ /* general string */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit9"));
return 0;
@@ -630,13 +673,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
cFYI(1, ("Need to call asn1_octets_decode() function for %s",
ctx.pointer)); /* is this UTF-8 or ASCII? */
-
+decode_negtoken_exit:
if (use_kerberos)
*secType = Kerberos;
else if (use_mskerberos)
*secType = MSKerberos;
else if (use_ntlmssp)
- *secType = NTLMSSP;
+ *secType = RawNTLMSSP;
return 1;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0d92114195ab..9f669f982c4d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -333,6 +333,27 @@ cifs_destroy_inode(struct inode *inode)
kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
}
+static void
+cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
+{
+ seq_printf(s, ",addr=");
+
+ switch (server->addr.sockAddr.sin_family) {
+ case AF_INET:
+ seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ seq_printf(s, "%pI6",
+ &server->addr.sockAddr6.sin6_addr.s6_addr);
+ if (server->addr.sockAddr6.sin6_scope_id)
+ seq_printf(s, "%%%u",
+ server->addr.sockAddr6.sin6_scope_id);
+ break;
+ default:
+ seq_printf(s, "(unknown)");
+ }
+}
+
/*
* cifs_show_options() is for displaying mount options in /proc/mounts.
* Not all settable options are displayed but most of the important
@@ -343,83 +364,64 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
{
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
- struct TCP_Server_Info *server;
cifs_sb = CIFS_SB(m->mnt_sb);
+ tcon = cifs_sb->tcon;
- if (cifs_sb) {
- tcon = cifs_sb->tcon;
- if (tcon) {
- seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
- if (tcon->ses) {
- if (tcon->ses->userName)
- seq_printf(s, ",username=%s",
- tcon->ses->userName);
- if (tcon->ses->domainName)
- seq_printf(s, ",domain=%s",
- tcon->ses->domainName);
- server = tcon->ses->server;
- if (server) {
- seq_printf(s, ",addr=");
- switch (server->addr.sockAddr6.
- sin6_family) {
- case AF_INET6:
- seq_printf(s, "%pI6",
- &server->addr.sockAddr6.sin6_addr);
- break;
- case AF_INET:
- seq_printf(s, "%pI4",
- &server->addr.sockAddr.sin_addr.s_addr);
- break;
- }
- }
- }
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
- !(tcon->unix_ext))
- seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
- !(tcon->unix_ext))
- seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
- if (!tcon->unix_ext) {
- seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
+ seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
+ if (tcon->ses->userName)
+ seq_printf(s, ",username=%s", tcon->ses->userName);
+ if (tcon->ses->domainName)
+ seq_printf(s, ",domain=%s", tcon->ses->domainName);
+
+ seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ seq_printf(s, ",forceuid");
+
+ seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ seq_printf(s, ",forcegid");
+
+ cifs_show_address(s, tcon->ses->server);
+
+ if (!tcon->unix_ext)
+ seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
cifs_sb->mnt_file_mode,
cifs_sb->mnt_dir_mode);
- }
- if (tcon->seal)
- seq_printf(s, ",seal");
- if (tcon->nocase)
- seq_printf(s, ",nocase");
- if (tcon->retry)
- seq_printf(s, ",hard");
- }
- if (cifs_sb->prepath)
- seq_printf(s, ",prepath=%s", cifs_sb->prepath);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
- seq_printf(s, ",posixpaths");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
- seq_printf(s, ",setuids");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- seq_printf(s, ",serverino");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
- seq_printf(s, ",directio");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
- seq_printf(s, ",nouser_xattr");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
- seq_printf(s, ",mapchars");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
- seq_printf(s, ",sfu");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- seq_printf(s, ",nobrl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
- seq_printf(s, ",cifsacl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
- seq_printf(s, ",dynperm");
- if (m->mnt_sb->s_flags & MS_POSIXACL)
- seq_printf(s, ",acl");
-
- seq_printf(s, ",rsize=%d", cifs_sb->rsize);
- seq_printf(s, ",wsize=%d", cifs_sb->wsize);
- }
+ if (tcon->seal)
+ seq_printf(s, ",seal");
+ if (tcon->nocase)
+ seq_printf(s, ",nocase");
+ if (tcon->retry)
+ seq_printf(s, ",hard");
+ if (cifs_sb->prepath)
+ seq_printf(s, ",prepath=%s", cifs_sb->prepath);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ seq_printf(s, ",posixpaths");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
+ seq_printf(s, ",setuids");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ seq_printf(s, ",serverino");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ seq_printf(s, ",directio");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ seq_printf(s, ",nouser_xattr");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ seq_printf(s, ",mapchars");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ seq_printf(s, ",sfu");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ seq_printf(s, ",nobrl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+ seq_printf(s, ",cifsacl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ seq_printf(s, ",dynperm");
+ if (m->mnt_sb->s_flags & MS_POSIXACL)
+ seq_printf(s, ",acl");
+
+ seq_printf(s, ",rsize=%d", cifs_sb->rsize);
+ seq_printf(s, ",wsize=%d", cifs_sb->wsize);
+
return 0;
}
@@ -535,9 +537,14 @@ static void cifs_umount_begin(struct super_block *sb)
if (tcon == NULL)
return;
- lock_kernel();
read_lock(&cifs_tcp_ses_lock);
- if (tcon->tc_count == 1)
+ if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+ /* we have other mounts to same share or we have
+ already tried to force umount this and woken up
+ all waiting network requests, nothing to do */
+ read_unlock(&cifs_tcp_ses_lock);
+ return;
+ } else if (tcon->tc_count == 1)
tcon->tidStatus = CifsExiting;
read_unlock(&cifs_tcp_ses_lock);
@@ -552,9 +559,7 @@ static void cifs_umount_begin(struct super_block *sb)
wake_up_all(&tcon->ses->server->response_q);
msleep(1);
}
-/* BB FIXME - finish add checks for tidStatus BB */
- unlock_kernel();
return;
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 9570a0e8023f..586df24c9abb 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -24,6 +24,19 @@
#define ROOT_I 2
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static inline ino_t
+cifs_uniqueid_to_ino_t(u64 fileid)
+{
+ ino_t ino = (ino_t) fileid;
+ if (sizeof(ino_t) < sizeof(u64))
+ ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8;
+ return ino;
+}
+
extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
extern const struct address_space_operations cifs_addr_ops_smallbuf;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a61ab772c6f6..e6435cba8113 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -83,7 +83,7 @@ enum securityEnum {
NTLM, /* Legacy NTLM012 auth with NTLM hash */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
- NTLMSSP, /* NTLMSSP via SPNEGO, NTLMv2 hash */
+/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */
Kerberos, /* Kerberos via SPNEGO */
MSKerberos, /* MS Kerberos via SPNEGO */
};
@@ -371,6 +371,7 @@ struct cifsInodeInfo {
bool oplockPending:1;
bool delete_pending:1; /* DELETE_ON_CLOSE is set */
u64 server_eof; /* current file size on server */
+ u64 uniqueid; /* server inode number */
struct inode vfs_inode;
};
@@ -472,6 +473,30 @@ struct dfs_info3_param {
char *node_name;
};
+/*
+ * common struct for holding inode info when searching for or updating an
+ * inode with new info
+ */
+
+#define CIFS_FATTR_DFS_REFERRAL 0x1
+
+struct cifs_fattr {
+ u32 cf_flags;
+ u32 cf_cifsattrs;
+ u64 cf_uniqueid;
+ u64 cf_eof;
+ u64 cf_bytes;
+ uid_t cf_uid;
+ gid_t cf_gid;
+ umode_t cf_mode;
+ dev_t cf_rdev;
+ unsigned int cf_nlink;
+ unsigned int cf_dtype;
+ struct timespec cf_atime;
+ struct timespec cf_mtime;
+ struct timespec cf_ctime;
+};
+
static inline void free_dfs_info_param(struct dfs_info3_param *param)
{
if (param) {
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index a785f69dbc9f..2d07f890a842 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -2328,19 +2328,7 @@ struct file_attrib_tag {
typedef struct {
__le32 NextEntryOffset;
__u32 ResumeKey; /* as with FileIndex - no need to convert */
- __le64 EndOfFile;
- __le64 NumOfBytes;
- __le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
- __le64 LastAccessTime;
- __le64 LastModificationTime;
- __le64 Uid;
- __le64 Gid;
- __le32 Type;
- __le64 DevMajor;
- __le64 DevMinor;
- __le64 UniqueId;
- __le64 Permissions;
- __le64 Nlinks;
+ FILE_UNIX_BASIC_INFO basic;
char FileName[1];
} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f9452329bcce..b2bd83fd2aa4 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -74,7 +74,7 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
enum securityEnum *secType);
-extern int cifs_inet_pton(const int, const char *source, void *dst);
+extern int cifs_convert_address(char *src, void *dst);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
@@ -98,9 +98,14 @@ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
extern int cifs_posix_open(char *full_path, struct inode **pinode,
struct super_block *sb, int mode, int oflags,
int *poplock, __u16 *pnetfid, int xid);
-extern void posix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_BASIC_INFO *pData, int isNewInode);
+extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
+ FILE_UNIX_BASIC_INFO *info,
+ struct cifs_sb_info *cifs_sb);
+extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
extern struct inode *cifs_new_inode(struct super_block *sb, __u64 *inum);
+extern struct inode *cifs_iget(struct super_block *sb,
+ struct cifs_fattr *fattr);
+
extern int cifs_get_inode_info(struct inode **pinode,
const unsigned char *search_path,
FILE_ALL_INFO *pfile_info,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b84c61d5bca4..61007c627497 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -594,7 +594,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
else if (secFlags & CIFSSEC_MAY_KRB5)
server->secType = Kerberos;
else if (secFlags & CIFSSEC_MAY_NTLMSSP)
- server->secType = NTLMSSP;
+ server->secType = RawNTLMSSP;
else if (secFlags & CIFSSEC_MAY_LANMAN)
server->secType = LANMAN;
/* #ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -729,7 +729,7 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
* the tcon is no longer on the list, so no need to take lock before
* checking this.
*/
- if (tcon->need_reconnect)
+ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 97f4311b9a8e..e16d7592116a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -70,7 +70,6 @@ struct smb_vol {
mode_t file_mode;
mode_t dir_mode;
unsigned secFlg;
- bool rw:1;
bool retry:1;
bool intr:1;
bool setuids:1;
@@ -832,7 +831,6 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
- vol->rw = true;
/* default is always to request posix paths. */
vol->posix_paths = 1;
/* default to using server inode numbers where available */
@@ -1199,7 +1197,9 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if (strnicmp(data, "guest", 5) == 0) {
/* ignore */
} else if (strnicmp(data, "rw", 2) == 0) {
- vol->rw = true;
+ /* ignore */
+ } else if (strnicmp(data, "ro", 2) == 0) {
+ /* ignore */
} else if (strnicmp(data, "noblocksend", 11) == 0) {
vol->noblocksnd = 1;
} else if (strnicmp(data, "noautotune", 10) == 0) {
@@ -1218,8 +1218,6 @@ cifs_parse_mount_options(char *options, const char *devname,
parse these options again and set anything and it
is ok to just ignore them */
continue;
- } else if (strnicmp(data, "ro", 2) == 0) {
- vol->rw = false;
} else if (strnicmp(data, "hard", 4) == 0) {
vol->retry = 1;
} else if (strnicmp(data, "soft", 4) == 0) {
@@ -1386,8 +1384,10 @@ cifs_find_tcp_session(struct sockaddr_storage *addr)
server->addr.sockAddr.sin_addr.s_addr))
continue;
else if (addr->ss_family == AF_INET6 &&
- !ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
- &addr6->sin6_addr))
+ (!ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
+ &addr6->sin6_addr) ||
+ server->addr.sockAddr6.sin6_scope_id !=
+ addr6->sin6_scope_id))
continue;
++server->srv_count;
@@ -1433,28 +1433,15 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
memset(&addr, 0, sizeof(struct sockaddr_storage));
- if (volume_info->UNCip && volume_info->UNC) {
- rc = cifs_inet_pton(AF_INET, volume_info->UNCip,
- &sin_server->sin_addr.s_addr);
-
- if (rc <= 0) {
- /* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6, volume_info->UNCip,
- &sin_server6->sin6_addr.in6_u);
- if (rc > 0)
- addr.ss_family = AF_INET6;
- } else {
- addr.ss_family = AF_INET;
- }
+ cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip));
- if (rc <= 0) {
+ if (volume_info->UNCip && volume_info->UNC) {
+ rc = cifs_convert_address(volume_info->UNCip, &addr);
+ if (!rc) {
/* we failed translating address */
rc = -EINVAL;
goto out_err;
}
-
- cFYI(1, ("UNC: %s ip: %s", volume_info->UNC,
- volume_info->UNCip));
} else if (volume_info->UNCip) {
/* BB using ip addr as tcp_ses name to connect to the
DFS root below */
@@ -1513,14 +1500,14 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cFYI(1, ("attempting ipv6 connect"));
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
+ sin_server6->sin6_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr6, sin_server6,
sizeof(struct sockaddr_in6));
- sin_server6->sin6_port = htons(volume_info->port);
rc = ipv6_connect(tcp_ses);
} else {
+ sin_server->sin_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr, sin_server,
sizeof(struct sockaddr_in));
- sin_server->sin_port = htons(volume_info->port);
rc = ipv4_connect(tcp_ses);
}
if (rc < 0) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3758965d73d5..a40054faed7f 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -188,6 +188,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_fattr fattr;
cFYI(1, ("posix open %s", full_path));
@@ -236,22 +237,21 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
if (presp_data->Type == cpu_to_le32(-1))
goto posix_open_ret; /* open ok, caller does qpathinfo */
- /* get new inode and set it up */
if (!pinode)
goto posix_open_ret; /* caller does not need info */
+ cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
+
+ /* get new inode and set it up */
if (*pinode == NULL) {
- __u64 unique_id = le64_to_cpu(presp_data->UniqueId);
- *pinode = cifs_new_inode(sb, &unique_id);
+ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode) {
+ rc = -ENOMEM;
+ goto posix_open_ret;
+ }
+ } else {
+ cifs_fattr_to_inode(*pinode, &fattr);
}
- /* else an inode was passed in. Update its info, don't create one */
-
- /* We do not need to close the file if new_inode fails since
- the caller will retry qpathinfo as long as inode is null */
- if (*pinode == NULL)
- goto posix_open_ret;
-
- posix_fill_in_inode(*pinode, presp_data, 1);
cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only);
@@ -307,8 +307,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if (oplockEnabled)
@@ -540,8 +541,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
kfree(full_path);
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
rc = CIFSSMBOpen(xid, pTcon, full_path,
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index df4a306f697e..87948147d7ec 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -35,26 +35,11 @@
* 0 - name is not IP
*/
static int
-is_ip(const char *name)
+is_ip(char *name)
{
- int rc;
- struct sockaddr_in sin_server;
- struct sockaddr_in6 sin_server6;
-
- rc = cifs_inet_pton(AF_INET, name,
- &sin_server.sin_addr.s_addr);
-
- if (rc <= 0) {
- /* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6, name,
- &sin_server6.sin6_addr.in6_u);
- if (rc > 0)
- return 1;
- } else {
- return 1;
- }
- /* we failed translating address */
- return 0;
+ struct sockaddr_storage ss;
+
+ return cifs_convert_address(name, &ss);
}
static int
@@ -72,7 +57,7 @@ dns_resolver_instantiate(struct key *key, const void *data,
ip[datalen] = '\0';
/* make sure this looks like an address */
- if (!is_ip((const char *) ip)) {
+ if (!is_ip(ip)) {
kfree(ip);
return -EINVAL;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 06866841b97f..97ce4bf89d15 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -300,14 +300,16 @@ int cifs_open(struct inode *inode, struct file *file)
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
pCifsFile = cifs_fill_filedata(file);
if (pCifsFile) {
+ rc = 0;
FreeXid(xid);
- return 0;
+ return rc;
}
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
@@ -491,11 +493,12 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
return -EBADF;
xid = GetXid();
- mutex_unlock(&pCifsFile->fh_mutex);
+ mutex_lock(&pCifsFile->fh_mutex);
if (!pCifsFile->invalidHandle) {
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
+ rc = 0;
FreeXid(xid);
- return 0;
+ return rc;
}
if (file->f_path.dentry == NULL) {
@@ -524,7 +527,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
if (full_path == NULL) {
rc = -ENOMEM;
reopen_error_exit:
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
FreeXid(xid);
return rc;
}
@@ -566,14 +569,14 @@ reopen_error_exit:
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
cFYI(1, ("cifs_open returned 0x%x", rc));
cFYI(1, ("oplock: %d", oplock));
} else {
reopen_success:
pCifsFile->netfid = netfid;
pCifsFile->invalidHandle = false;
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
pCifsInode = CIFS_I(inode);
if (pCifsInode) {
if (can_flush) {
@@ -845,8 +848,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
tcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
@@ -1805,8 +1809,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
@@ -1885,8 +1890,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
@@ -2019,8 +2025,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
xid = GetXid();
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -2185,8 +2192,9 @@ static int cifs_readpage(struct file *file, struct page *page)
xid = GetXid();
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
cFYI(1, ("readpage %p at offset %d 0x%x\n",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fad882b075ba..b22379610d71 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -77,127 +77,146 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
}
}
-static void cifs_unix_info_to_inode(struct inode *inode,
- FILE_UNIX_BASIC_INFO *info, int force_uid_gid)
+/* populate an inode with info from a cifs_fattr struct */
+void
+cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
- __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes);
- __u64 end_of_file = le64_to_cpu(info->EndOfFile);
+ struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+ unsigned long now = jiffies;
+
+ inode->i_atime = fattr->cf_atime;
+ inode->i_mtime = fattr->cf_mtime;
+ inode->i_ctime = fattr->cf_ctime;
+ inode->i_mode = fattr->cf_mode;
+ inode->i_rdev = fattr->cf_rdev;
+ inode->i_nlink = fattr->cf_nlink;
+ inode->i_uid = fattr->cf_uid;
+ inode->i_gid = fattr->cf_gid;
+
+ cifs_i->cifsAttrs = fattr->cf_cifsattrs;
+ cifs_i->uniqueid = fattr->cf_uniqueid;
- inode->i_atime = cifs_NTtimeToUnix(info->LastAccessTime);
- inode->i_mtime =
- cifs_NTtimeToUnix(info->LastModificationTime);
- inode->i_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
- inode->i_mode = le64_to_cpu(info->Permissions);
+ cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode,
+ cifs_i->time, now));
+ cifs_i->time = now;
+
+ /*
+ * Can't safely change the file size here if the client is writing to
+ * it due to potential races.
+ */
+ spin_lock(&inode->i_lock);
+ if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
+ i_size_write(inode, fattr->cf_eof);
+
+ /*
+ * i_blocks is not related to (i_size / i_blksize),
+ * but instead 512 byte (2**9) size is required for
+ * calculating num blocks.
+ */
+ inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
+ }
+ spin_unlock(&inode->i_lock);
+
+ cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL);
+}
+
+/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
+void
+cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
+ struct cifs_sb_info *cifs_sb)
+{
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
+ fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
+ fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+
+ fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+ fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
+ fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
+ fattr->cf_mode = le64_to_cpu(info->Permissions);
/*
* Since we set the inode type below we need to mask off
* to avoid strange results if bits set above.
*/
- inode->i_mode &= ~S_IFMT;
+ fattr->cf_mode &= ~S_IFMT;
switch (le32_to_cpu(info->Type)) {
case UNIX_FILE:
- inode->i_mode |= S_IFREG;
+ fattr->cf_mode |= S_IFREG;
+ fattr->cf_dtype = DT_REG;
break;
case UNIX_SYMLINK:
- inode->i_mode |= S_IFLNK;
+ fattr->cf_mode |= S_IFLNK;
+ fattr->cf_dtype = DT_LNK;
break;
case UNIX_DIR:
- inode->i_mode |= S_IFDIR;
+ fattr->cf_mode |= S_IFDIR;
+ fattr->cf_dtype = DT_DIR;
break;
case UNIX_CHARDEV:
- inode->i_mode |= S_IFCHR;
- inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
- le64_to_cpu(info->DevMinor) & MINORMASK);
+ fattr->cf_mode |= S_IFCHR;
+ fattr->cf_dtype = DT_CHR;
+ fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+ le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_BLOCKDEV:
- inode->i_mode |= S_IFBLK;
- inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
- le64_to_cpu(info->DevMinor) & MINORMASK);
+ fattr->cf_mode |= S_IFBLK;
+ fattr->cf_dtype = DT_BLK;
+ fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+ le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_FIFO:
- inode->i_mode |= S_IFIFO;
+ fattr->cf_mode |= S_IFIFO;
+ fattr->cf_dtype = DT_FIFO;
break;
case UNIX_SOCKET:
- inode->i_mode |= S_IFSOCK;
+ fattr->cf_mode |= S_IFSOCK;
+ fattr->cf_dtype = DT_SOCK;
break;
default:
/* safest to call it a file if we do not know */
- inode->i_mode |= S_IFREG;
+ fattr->cf_mode |= S_IFREG;
+ fattr->cf_dtype = DT_REG;
cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
break;
}
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) &&
- !force_uid_gid)
- inode->i_uid = cifs_sb->mnt_uid;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ fattr->cf_uid = cifs_sb->mnt_uid;
else
- inode->i_uid = le64_to_cpu(info->Uid);
+ fattr->cf_uid = le64_to_cpu(info->Uid);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) &&
- !force_uid_gid)
- inode->i_gid = cifs_sb->mnt_gid;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ fattr->cf_gid = cifs_sb->mnt_gid;
else
- inode->i_gid = le64_to_cpu(info->Gid);
-
- inode->i_nlink = le64_to_cpu(info->Nlinks);
-
- cifsInfo->server_eof = end_of_file;
- spin_lock(&inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /*
- * We can not safely change the file size here if the client
- * is writing to it due to potential races.
- */
- i_size_write(inode, end_of_file);
+ fattr->cf_gid = le64_to_cpu(info->Gid);
- /*
- * i_blocks is not related to (i_size / i_blksize),
- * but instead 512 byte (2**9) size is required for
- * calculating num blocks.
- */
- inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
- }
- spin_unlock(&inode->i_lock);
+ fattr->cf_nlink = le64_to_cpu(info->Nlinks);
}
-
/*
- * Needed to setup inode data for the directory which is the
- * junction to the new submount (ie to setup the fake directory
- * which represents a DFS referral)
+ * Fill a cifs_fattr struct with fake inode info.
+ *
+ * Needed to setup cifs_fattr data for the directory which is the
+ * junction to the new submount (ie to setup the fake directory
+ * which represents a DFS referral).
*/
-static void fill_fake_finddataunix(FILE_UNIX_BASIC_INFO *pfnd_dat,
- struct super_block *sb)
+void
+cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
- struct inode *pinode = NULL;
-
- memset(pfnd_dat, 0, sizeof(FILE_UNIX_BASIC_INFO));
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
-/* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0);
- __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0);
- __u64 UniqueId = 0; */
- pfnd_dat->LastStatusChange =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->LastAccessTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->LastModificationTime =
- cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- pfnd_dat->Type = cpu_to_le32(UNIX_DIR);
- pfnd_dat->Permissions = cpu_to_le64(S_IXUGO | S_IRWXU);
- pfnd_dat->Nlinks = cpu_to_le64(2);
- if (sb->s_root)
- pinode = sb->s_root->d_inode;
- if (pinode == NULL)
- return;
-
- /* fill in default values for the remaining based on root
- inode since we can not query the server for this inode info */
- pfnd_dat->DevMajor = cpu_to_le64(MAJOR(pinode->i_rdev));
- pfnd_dat->DevMinor = cpu_to_le64(MINOR(pinode->i_rdev));
- pfnd_dat->Uid = cpu_to_le64(pinode->i_uid);
- pfnd_dat->Gid = cpu_to_le64(pinode->i_gid);
+ cFYI(1, ("creating fake fattr for DFS referral"));
+
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
+ fattr->cf_uid = cifs_sb->mnt_uid;
+ fattr->cf_gid = cifs_sb->mnt_gid;
+ fattr->cf_atime = CURRENT_TIME;
+ fattr->cf_ctime = CURRENT_TIME;
+ fattr->cf_mtime = CURRENT_TIME;
+ fattr->cf_nlink = 2;
+ fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
}
/**
@@ -244,66 +263,42 @@ cifs_new_inode(struct super_block *sb, __u64 *inum)
}
int cifs_get_inode_info_unix(struct inode **pinode,
- const unsigned char *full_path, struct super_block *sb, int xid)
+ const unsigned char *full_path,
+ struct super_block *sb, int xid)
{
- int rc = 0;
+ int rc;
FILE_UNIX_BASIC_INFO find_data;
- struct cifsTconInfo *pTcon;
- struct inode *inode;
+ struct cifs_fattr fattr;
+ struct cifsTconInfo *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- bool is_dfs_referral = false;
- struct cifsInodeInfo *cifsInfo;
- __u64 num_of_bytes;
- __u64 end_of_file;
- pTcon = cifs_sb->tcon;
+ tcon = cifs_sb->tcon;
cFYI(1, ("Getting info on %s", full_path));
/* could have done a find first instead but this returns more info */
- rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data,
+ rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc == -EREMOTE && !is_dfs_referral) {
- is_dfs_referral = true;
- cFYI(DBG2, ("DFS ref"));
- /* for DFS, server does not give us real inode data */
- fill_fake_finddataunix(&find_data, sb);
- rc = 0;
- } else if (rc)
- goto cgiiu_exit;
- num_of_bytes = le64_to_cpu(find_data.NumOfBytes);
- end_of_file = le64_to_cpu(find_data.EndOfFile);
+ if (!rc) {
+ cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
+ } else if (rc == -EREMOTE) {
+ cifs_create_dfs_fattr(&fattr, sb);
+ rc = 0;
+ } else {
+ return rc;
+ }
- /* get new inode */
if (*pinode == NULL) {
- __u64 unique_id = le64_to_cpu(find_data.UniqueId);
- *pinode = cifs_new_inode(sb, &unique_id);
- if (*pinode == NULL) {
+ /* get new inode */
+ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode)
rc = -ENOMEM;
- goto cgiiu_exit;
- }
+ } else {
+ /* we already have inode, update it */
+ cifs_fattr_to_inode(*pinode, &fattr);
}
- inode = *pinode;
- cifsInfo = CIFS_I(inode);
-
- cFYI(1, ("Old time %ld", cifsInfo->time));
- cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld", cifsInfo->time));
- /* this is ok to set on every inode revalidate */
- atomic_set(&cifsInfo->inUse, 1);
-
- cifs_unix_info_to_inode(inode, &find_data, 0);
-
- if (num_of_bytes < end_of_file)
- cFYI(1, ("allocation size less than end of file"));
- cFYI(1, ("Size %ld and blocks %llu",
- (unsigned long) inode->i_size,
- (unsigned long long)inode->i_blocks));
-
- cifs_set_ops(inode, is_dfs_referral);
-cgiiu_exit:
return rc;
}
@@ -695,33 +690,85 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
return full_path;
}
+static int
+cifs_find_inode(struct inode *inode, void *opaque)
+{
+ struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+
+ if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
+ return 0;
+
+ return 1;
+}
+
+static int
+cifs_init_inode(struct inode *inode, void *opaque)
+{
+ struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+
+ CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
+ return 0;
+}
+
+/* Given fattrs, get a corresponding inode */
+struct inode *
+cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
+{
+ unsigned long hash;
+ struct inode *inode;
+
+ cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
+
+ /* hash down to 32-bits on 32-bit arch */
+ hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
+
+ inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
+
+ /* we have fattrs in hand, update the inode */
+ if (inode) {
+ cifs_fattr_to_inode(inode, fattr);
+ if (sb->s_flags & MS_NOATIME)
+ inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ if (inode->i_state & I_NEW) {
+ inode->i_ino = hash;
+ unlock_new_inode(inode);
+ }
+ }
+
+ return inode;
+}
+
/* gets root inode */
struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
{
int xid;
struct cifs_sb_info *cifs_sb;
- struct inode *inode;
+ struct inode *inode = NULL;
long rc;
char *full_path;
- inode = iget_locked(sb, ino);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
-
- cifs_sb = CIFS_SB(inode->i_sb);
+ cifs_sb = CIFS_SB(sb);
full_path = cifs_build_path_to_root(cifs_sb);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
xid = GetXid();
- if (cifs_sb->tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
- xid);
- else
+ if (cifs_sb->tcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb,
xid, NULL);
+ unlock_new_inode(inode);
+ }
+
if (rc && cifs_sb->tcon->ipc) {
cFYI(1, ("ipc connection - fake read inode"));
inode->i_mode |= S_IFDIR;
@@ -737,7 +784,6 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
return ERR_PTR(rc);
}
- unlock_new_inode(inode);
kfree(full_path);
/* can not call macro FreeXid here since in a void func
@@ -988,8 +1034,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -1062,44 +1109,6 @@ out_reval:
return rc;
}
-void posix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_BASIC_INFO *pData, int isNewInode)
-{
- struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
- loff_t local_size;
- struct timespec local_mtime;
-
- cifsInfo->time = jiffies;
- atomic_inc(&cifsInfo->inUse);
-
- /* save mtime and size */
- local_mtime = tmp_inode->i_mtime;
- local_size = tmp_inode->i_size;
-
- cifs_unix_info_to_inode(tmp_inode, pData, 1);
- cifs_set_ops(tmp_inode, false);
-
- if (!S_ISREG(tmp_inode->i_mode))
- return;
-
- /*
- * No sense invalidating pages for new inode
- * since we we have not started caching
- * readahead file data yet.
- */
- if (isNewInode)
- return;
-
- if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
- (local_size == tmp_inode->i_size)) {
- cFYI(1, ("inode exists but unchanged"));
- } else {
- /* file may have changed on server */
- cFYI(1, ("invalidate inode, readdir detected change"));
- invalidate_remote_inode(tmp_inode);
- }
-}
-
int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
{
int rc = 0, tmprc;
@@ -1108,6 +1117,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
+ struct cifs_fattr fattr;
cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode));
@@ -1118,8 +1128,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if ((pTcon->ses->capabilities & CAP_UNIX) &&
@@ -1146,7 +1157,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
cFYI(1, ("posix mkdir returned 0x%x", rc));
d_drop(direntry);
} else {
- __u64 unique_id;
if (pInfo->Type == cpu_to_le32(-1)) {
/* no return info, go query for it */
kfree(pInfo);
@@ -1160,20 +1170,15 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
else
direntry->d_op = &cifs_dentry_ops;
- unique_id = le64_to_cpu(pInfo->UniqueId);
- newinode = cifs_new_inode(inode->i_sb, &unique_id);
- if (newinode == NULL) {
+ cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
+ newinode = cifs_iget(inode->i_sb, &fattr);
+ if (!newinode) {
kfree(pInfo);
goto mkdir_get_info;
}
- newinode->i_nlink = 2;
d_instantiate(direntry, newinode);
- /* we already checked in POSIXCreate whether
- frame was long enough */
- posix_fill_in_inode(direntry->d_inode,
- pInfo, 1 /* NewInode */);
#ifdef CONFIG_CIFS_DEBUG2
cFYI(1, ("instantiated dentry %p %s to inode %p",
direntry, direntry->d_name.name, newinode));
@@ -1303,8 +1308,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
@@ -1508,8 +1514,9 @@ int cifs_revalidate(struct dentry *direntry)
since that would deadlock */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
"jiffies %ld", full_path, direntry->d_inode,
@@ -1618,6 +1625,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (!err) {
generic_fillattr(dentry->d_inode, stat);
stat->blksize = CIFS_MAX_MSGSIZE;
+ stat->ino = CIFS_I(dentry->d_inode)->uniqueid;
}
return err;
}
@@ -1911,8 +1919,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cd83c53fcbb5..fc1e0487eaee 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -172,8 +172,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("Full path: %s", full_path));
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 32d6baa0a54f..bd6d6895730d 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -133,10 +133,12 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
{0, 0}
};
-/* Convert string containing dotted ip address to binary form */
-/* returns 0 if invalid address */
-
-int
+/*
+ * Convert a string containing text IPv4 or IPv6 address to binary form.
+ *
+ * Returns 0 on failure.
+ */
+static int
cifs_inet_pton(const int address_family, const char *cp, void *dst)
{
int ret = 0;
@@ -153,6 +155,52 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
return ret;
}
+/*
+ * Try to convert a string to an IPv4 address and then attempt to convert
+ * it to an IPv6 address if that fails. Set the family field if either
+ * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to
+ * treat the part following it as a numeric sin6_scope_id.
+ *
+ * Returns 0 on failure.
+ */
+int
+cifs_convert_address(char *src, void *dst)
+{
+ int rc;
+ char *pct, *endp;
+ struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
+ struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
+
+ /* IPv4 address */
+ if (cifs_inet_pton(AF_INET, src, &s4->sin_addr.s_addr)) {
+ s4->sin_family = AF_INET;
+ return 1;
+ }
+
+ /* temporarily terminate string */
+ pct = strchr(src, '%');
+ if (pct)
+ *pct = '\0';
+
+ rc = cifs_inet_pton(AF_INET6, src, &s6->sin6_addr.s6_addr);
+
+ /* repair temp termination (if any) and make pct point to scopeid */
+ if (pct)
+ *pct++ = '%';
+
+ if (!rc)
+ return rc;
+
+ s6->sin6_family = AF_INET6;
+ if (pct) {
+ s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
+ if (!*pct || *endp)
+ return 0;
+ }
+
+ return rc;
+}
+
/*****************************************************************************
convert a NT status code to a dos class/code
*****************************************************************************/
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 86d0055dc529..231aa6953f83 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -63,6 +63,55 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
}
#endif /* DEBUG2 */
+/*
+ * Find the dentry that matches "name". If there isn't one, create one. If it's
+ * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ */
+static struct dentry *
+cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+ struct cifs_fattr *fattr)
+{
+ struct dentry *dentry, *alias;
+ struct inode *inode;
+ struct super_block *sb = parent->d_inode->i_sb;
+
+ cFYI(1, ("For %s", name->name));
+
+ dentry = d_lookup(parent, name);
+ if (dentry) {
+ /* FIXME: check for inode number changes? */
+ if (dentry->d_inode != NULL)
+ return dentry;
+ d_drop(dentry);
+ dput(dentry);
+ }
+
+ dentry = d_alloc(parent, name);
+ if (dentry == NULL)
+ return NULL;
+
+ inode = cifs_iget(sb, fattr);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+
+ if (CIFS_SB(sb)->tcon->nocase)
+ dentry->d_op = &cifs_ci_dentry_ops;
+ else
+ dentry->d_op = &cifs_dentry_ops;
+
+ alias = d_materialise_unique(dentry, inode);
+ if (alias != NULL) {
+ dput(dentry);
+ if (IS_ERR(alias))
+ return NULL;
+ dentry = alias;
+ }
+
+ return dentry;
+}
+
/* Returns 1 if new inode created, 2 if both dentry and inode were */
/* Might check in the future if inode number changed so we can rehash inode */
static int
@@ -76,7 +125,6 @@ construct_dentry(struct qstr *qstring, struct file *file,
cFYI(1, ("For %s", qstring->name));
- qstring->hash = full_name_hash(qstring->name, qstring->len);
tmp_dentry = d_lookup(file->f_path.dentry, qstring);
if (tmp_dentry) {
/* BB: overwrite old name? i.e. tmp_dentry->d_name and
@@ -299,140 +347,6 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
}
}
-static void unix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_INFO *pfindData, unsigned int *pobject_type, int isNewInode)
-{
- loff_t local_size;
- struct timespec local_mtime;
-
- struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
-
- __u32 type = le32_to_cpu(pfindData->Type);
- __u64 num_of_bytes = le64_to_cpu(pfindData->NumOfBytes);
- __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
- cifsInfo->time = jiffies;
- atomic_inc(&cifsInfo->inUse);
-
- /* save mtime and size */
- local_mtime = tmp_inode->i_mtime;
- local_size = tmp_inode->i_size;
-
- tmp_inode->i_atime =
- cifs_NTtimeToUnix(pfindData->LastAccessTime);
- tmp_inode->i_mtime =
- cifs_NTtimeToUnix(pfindData->LastModificationTime);
- tmp_inode->i_ctime =
- cifs_NTtimeToUnix(pfindData->LastStatusChange);
-
- tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions);
- /* since we set the inode type below we need to mask off type
- to avoid strange results if bits above were corrupt */
- tmp_inode->i_mode &= ~S_IFMT;
- if (type == UNIX_FILE) {
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- } else if (type == UNIX_SYMLINK) {
- *pobject_type = DT_LNK;
- tmp_inode->i_mode |= S_IFLNK;
- } else if (type == UNIX_DIR) {
- *pobject_type = DT_DIR;
- tmp_inode->i_mode |= S_IFDIR;
- } else if (type == UNIX_CHARDEV) {
- *pobject_type = DT_CHR;
- tmp_inode->i_mode |= S_IFCHR;
- tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
- le64_to_cpu(pfindData->DevMinor) & MINORMASK);
- } else if (type == UNIX_BLOCKDEV) {
- *pobject_type = DT_BLK;
- tmp_inode->i_mode |= S_IFBLK;
- tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor),
- le64_to_cpu(pfindData->DevMinor) & MINORMASK);
- } else if (type == UNIX_FIFO) {
- *pobject_type = DT_FIFO;
- tmp_inode->i_mode |= S_IFIFO;
- } else if (type == UNIX_SOCKET) {
- *pobject_type = DT_SOCK;
- tmp_inode->i_mode |= S_IFSOCK;
- } else {
- /* safest to just call it a file */
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- cFYI(1, ("unknown inode type %d", type));
- }
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
- tmp_inode->i_uid = cifs_sb->mnt_uid;
- else
- tmp_inode->i_uid = le64_to_cpu(pfindData->Uid);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
- tmp_inode->i_gid = cifs_sb->mnt_gid;
- else
- tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
- tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);
-
- cifsInfo->server_eof = end_of_file;
- spin_lock(&tmp_inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
- client is writing to it due to potential races */
- i_size_write(tmp_inode, end_of_file);
-
- /* 512 bytes (2**9) is the fake blocksize that must be used */
- /* for this calculation, not the real blocksize */
- tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
- }
- spin_unlock(&tmp_inode->i_lock);
-
- if (S_ISREG(tmp_inode->i_mode)) {
- cFYI(1, ("File inode"));
- tmp_inode->i_op = &cifs_file_inode_ops;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_direct_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_ops;
-
- if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
- (cifs_sb->tcon->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
- else
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
-
- if (isNewInode)
- return; /* No sense invalidating pages for new inode
- since we have not started caching readahead
- file data for it yet */
-
- if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
- (local_size == tmp_inode->i_size)) {
- cFYI(1, ("inode exists but unchanged"));
- } else {
- /* file may have changed on server */
- cFYI(1, ("invalidate inode, readdir detected change"));
- invalidate_remote_inode(tmp_inode);
- }
- } else if (S_ISDIR(tmp_inode->i_mode)) {
- cFYI(1, ("Directory inode"));
- tmp_inode->i_op = &cifs_dir_inode_ops;
- tmp_inode->i_fop = &cifs_dir_ops;
- } else if (S_ISLNK(tmp_inode->i_mode)) {
- cFYI(1, ("Symbolic Link inode"));
- tmp_inode->i_op = &cifs_symlink_inode_ops;
-/* tmp_inode->i_fop = *//* do not need to set to anything */
- } else {
- cFYI(1, ("Special inode"));
- init_special_inode(tmp_inode, tmp_inode->i_mode,
- tmp_inode->i_rdev);
- }
-}
-
/* BB eventually need to add the following helper function to
resolve NT_STATUS_STOPPED_ON_SYMLINK return code when
we try to do FindFirst on (NTFS) directory symlinks */
@@ -872,7 +786,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
len = strnlen(filename, PATH_MAX);
}
- *pinum = le64_to_cpu(pFindData->UniqueId);
+ *pinum = le64_to_cpu(pFindData->basic.UniqueId);
} else if (level == SMB_FIND_FILE_DIRECTORY_INFO) {
FILE_DIRECTORY_INFO *pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
@@ -934,9 +848,11 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
struct cifsFileInfo *pCifsF;
unsigned int obj_type;
__u64 inum;
+ ino_t ino;
struct cifs_sb_info *cifs_sb;
struct inode *tmp_inode;
struct dentry *tmp_dentry;
+ struct cifs_fattr fattr;
/* get filename and len into qstring */
/* get dentry */
@@ -967,39 +883,49 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
return rc;
/* only these two infolevels return valid inode numbers */
- if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX ||
- pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO)
- rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry,
- &inum);
- else
- rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry,
- NULL);
+ if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
+ cifs_unix_basic_to_fattr(&fattr,
+ &((FILE_UNIX_INFO *) pfindEntry)->basic,
+ cifs_sb);
+ tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring,
+ &fattr);
+ obj_type = fattr.cf_dtype;
+ ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
+ } else {
+ if (pCifsF->srch_inf.info_level ==
+ SMB_FIND_FILE_ID_FULL_DIR_INFO)
+ rc = construct_dentry(&qstring, file, &tmp_inode,
+ &tmp_dentry, &inum);
+ else
+ rc = construct_dentry(&qstring, file, &tmp_inode,
+ &tmp_dentry, NULL);
- if ((tmp_inode == NULL) || (tmp_dentry == NULL))
- return -ENOMEM;
+ if ((tmp_inode == NULL) || (tmp_dentry == NULL)) {
+ rc = -ENOMEM;
+ goto out;
+ }
- /* we pass in rc below, indicating whether it is a new inode,
- so we can figure out whether to invalidate the inode cached
- data if the file has changed */
- if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
- unix_fill_in_inode(tmp_inode,
- (FILE_UNIX_INFO *)pfindEntry,
- &obj_type, rc);
- else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
- fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
- pfindEntry, &obj_type, rc);
- else
- fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
+ /* we pass in rc below, indicating whether it is a new inode,
+ * so we can figure out whether to invalidate the inode cached
+ * data if the file has changed
+ */
+ if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
+ fill_in_inode(tmp_inode, 0, pfindEntry, &obj_type, rc);
+ else
+ fill_in_inode(tmp_inode, 1, pfindEntry, &obj_type, rc);
- if (rc) /* new inode - needs to be tied to dentry */ {
- d_instantiate(tmp_dentry, tmp_inode);
- if (rc == 2)
- d_rehash(tmp_dentry);
- }
+ /* new inode - needs to be tied to dentry */
+ if (rc) {
+ d_instantiate(tmp_dentry, tmp_inode);
+ if (rc == 2)
+ d_rehash(tmp_dentry);
+ }
+ ino = cifs_uniqueid_to_ino_t(tmp_inode->i_ino);
+ }
rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
- tmp_inode->i_ino, obj_type);
+ ino, obj_type);
if (rc) {
cFYI(1, ("filldir rc = %d", rc));
/* we can not return filldir errors to the caller
@@ -1008,6 +934,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
rc = -EOVERFLOW;
}
+out:
dput(tmp_dentry);
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 897a052270f9..7085a6275c4c 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -802,7 +802,7 @@ ssetup_ntlmssp_authenticate:
#endif /* CONFIG_CIFS_UPCALL */
} else {
#ifdef CONFIG_CIFS_EXPERIMENTAL
- if ((experimEnabled > 1) && (type == RawNTLMSSP)) {
+ if (type == RawNTLMSSP) {
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
cERROR(1, ("NTLMSSP requires Unicode support"));
rc = -ENOSYS;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index e9527eedc639..a75afa3dd9e1 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -64,8 +64,9 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if (ea_name == NULL) {
cFYI(1, ("Null xattr names not supported"));
@@ -118,8 +119,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -225,8 +227,9 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -351,8 +354,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 4921e7426d95..a2f746066c5d 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -51,6 +51,7 @@ static const struct address_space_operations configfs_aops = {
};
static struct backing_dev_info configfs_backing_dev_info = {
+ .name = "configfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 3f0e1974abdc..31d12de83a2a 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -14,35 +14,44 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
-#include <linux/eventfd.h>
#include <linux/syscalls.h>
#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/eventfd.h>
struct eventfd_ctx {
+ struct kref kref;
wait_queue_head_t wqh;
/*
* Every time that a write(2) is performed on an eventfd, the
* value of the __u64 being written is added to "count" and a
* wakeup is performed on "wqh". A read(2) will return the "count"
* value to userspace, and will reset "count" to zero. The kernel
- * size eventfd_signal() also, adds to the "count" counter and
+ * side eventfd_signal() also, adds to the "count" counter and
* issue a wakeup.
*/
__u64 count;
unsigned int flags;
};
-/*
- * Adds "n" to the eventfd counter "count". Returns "n" in case of
- * success, or a value lower then "n" in case of coutner overflow.
- * This function is supposed to be called by the kernel in paths
- * that do not allow sleeping. In this function we allow the counter
- * to reach the ULLONG_MAX value, and we signal this as overflow
- * condition by returining a POLLERR to poll(2).
+/**
+ * eventfd_signal - Adds @n to the eventfd counter.
+ * @ctx: [in] Pointer to the eventfd context.
+ * @n: [in] Value of the counter to be added to the eventfd internal counter.
+ * The value cannot be negative.
+ *
+ * This function is supposed to be called by the kernel in paths that do not
+ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+ * value, and we signal this as overflow condition by returining a POLLERR
+ * to poll(2).
+ *
+ * Returns @n in case of success, a non-negative number lower than @n in case
+ * of overflow, or the following error codes:
+ *
+ * -EINVAL : The value of @n is negative.
*/
-int eventfd_signal(struct file *file, int n)
+int eventfd_signal(struct eventfd_ctx *ctx, int n)
{
- struct eventfd_ctx *ctx = file->private_data;
unsigned long flags;
if (n < 0)
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n)
}
EXPORT_SYMBOL_GPL(eventfd_signal);
+static void eventfd_free(struct kref *kref)
+{
+ struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
+
+ kfree(ctx);
+}
+
+/**
+ * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to the eventfd context.
+ *
+ * Returns: In case of success, returns a pointer to the eventfd context.
+ */
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
+{
+ kref_get(&ctx->kref);
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_get);
+
+/**
+ * eventfd_ctx_put - Releases a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to eventfd context.
+ *
+ * The eventfd context reference must have been previously acquired either
+ * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ */
+void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+ kref_put(&ctx->kref, eventfd_free);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_put);
+
static int eventfd_release(struct inode *inode, struct file *file)
{
- kfree(file->private_data);
+ struct eventfd_ctx *ctx = file->private_data;
+
+ wake_up_poll(&ctx->wqh, POLLHUP);
+ eventfd_ctx_put(ctx);
return 0;
}
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = {
.write = eventfd_write,
};
+/**
+ * eventfd_fget - Acquire a reference of an eventfd file descriptor.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the eventfd file structure in case of success, or the
+ * following error pointer:
+ *
+ * -EBADF : Invalid @fd file descriptor.
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
struct file *eventfd_fget(int fd)
{
struct file *file;
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd)
}
EXPORT_SYMBOL_GPL(eventfd_fget);
+/**
+ * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointers returned by the following functions:
+ *
+ * eventfd_fget
+ */
+struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ struct file *file;
+ struct eventfd_ctx *ctx;
+
+ file = eventfd_fget(fd);
+ if (IS_ERR(file))
+ return (struct eventfd_ctx *) file;
+ ctx = eventfd_ctx_get(file->private_data);
+ fput(file);
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
+
+/**
+ * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
+ * @file: [in] Eventfd file pointer.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointer:
+ *
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
+{
+ if (file->f_op != &eventfd_fops)
+ return ERR_PTR(-EINVAL);
+
+ return eventfd_ctx_get(file->private_data);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
+
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
int fd;
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
if (!ctx)
return -ENOMEM;
+ kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index 24667eedc023..c6718e4817fe 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -2,9 +2,7 @@
* common.h - Common definitions for both Kernel and user-mode utilities
*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 65b0c8c776a1..4cfab1cc75c0 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 0fd4c7859679..5ec72e020b22 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -156,6 +154,9 @@ ino_t exofs_parent_ino(struct dentry *child);
int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
struct inode *);
+/* super.c */
+int exofs_sync_fs(struct super_block *sb, int wait);
+
/*********************
* operation vectors *
*********************/
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 6ed7fe484752..839b9dc1e70f 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -47,16 +45,23 @@ static int exofs_file_fsync(struct file *filp, struct dentry *dentry,
{
int ret;
struct address_space *mapping = filp->f_mapping;
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb;
ret = filemap_write_and_wait(mapping);
if (ret)
return ret;
- /*Note: file_fsync below also calles sync_blockdev, which is a no-op
- * for exofs, but other then that it does sync_inode and
- * sync_superblock which is what we need here.
- */
- return file_fsync(filp, dentry, datasync);
+ /* sync the inode attributes */
+ ret = write_inode_now(inode, 1);
+
+ /* This is a good place to write the sb */
+ /* TODO: Sechedule an sb-sync on create */
+ sb = inode->i_sb;
+ if (sb->s_dirt)
+ exofs_sync_fs(sb, 1);
+
+ return ret;
}
static int exofs_flush(struct file *file, fl_owner_t id)
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 77d0a295eb1c..6c10f7476699 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -295,6 +293,9 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
err:
if (!is_sync)
_unlock_pcol_pages(pcol, ret, READ);
+ else /* Pages unlocked by caller in sync mode only free bio */
+ pcol_free(pcol);
+
kfree(pcol_copy);
if (or)
osd_end_request(or);
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 77fdd765e76d..b7dd0c236863 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index b3d2ccb87aaa..4372542df284 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 8216c5b77b53..a343b4ea62f6 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
@@ -200,7 +198,7 @@ static const struct export_operations exofs_export_ops;
/*
* Write the superblock to the OSD
*/
-static int exofs_sync_fs(struct super_block *sb, int wait)
+int exofs_sync_fs(struct super_block *sb, int wait)
{
struct exofs_sb_info *sbi;
struct exofs_fscb *fscb;
diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c
index 36e2d7bc7f7b..4dd687c3e747 100644
--- a/fs/exofs/symlink.c
+++ b/fs/exofs/symlink.c
@@ -1,8 +1,6 @@
/*
* Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
- * Copyright (C) 2005, 2006
- * International Business Machines
+ * Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 6524ecaebb7a..e1dedb0f7873 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
inode = NULL;
if (ino) {
inode = ext2_iget(dir->i_sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
+ if (unlikely(IS_ERR(inode))) {
+ if (PTR_ERR(inode) == -ESTALE) {
+ ext2_error(dir->i_sb, __func__,
+ "deleted inode referenced: %lu",
+ ino);
+ return ERR_PTR(-EIO);
+ } else {
+ return ERR_CAST(inode);
+ }
+ }
}
return d_splice_alias(inode, dentry);
}
diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig
index 182f9ffe2b51..907a5de8674f 100644
--- a/fs/fat/Kconfig
+++ b/fs/fat/Kconfig
@@ -74,6 +74,26 @@ config VFAT_FS
To compile this as a module, choose M here: the module will be called
vfat.
+config VFAT_FS_DUALNAMES
+ bool "VFAT dual names support"
+ depends on VFAT_FS
+ help
+ This option provides support for dual filenames on VFAT filesystems.
+ If this option is disabled then file creation will either put
+ a short (8.3) name or a long name on the file, but never both.
+ The field where a shortname would normally go is filled with
+ invalid characters such that it cannot be considered a valid
+ short filename.
+
+ That means that long filenames created with this option
+ disabled will not be accessible at all to operating systems
+ that do not understand the VFAT extensions.
+
+ Users considering enabling this option should consider the implications
+ of any patents that may exist on dual filenames in VFAT.
+
+ If unsure, say N
+
config FAT_DEFAULT_CODEPAGE
int "Default codepage for FAT"
depends on MSDOS_FS || VFAT_FS
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 38ff75a0fe22..cd5d3ecdfb5e 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -420,14 +420,13 @@ parse_record:
}
i += chl;
}
- if (!last_u)
- continue;
-
- /* Compare shortname */
- bufuname[last_u] = 0x0000;
- len = fat_uni_to_x8(sbi, bufuname, bufname, sizeof(bufname));
- if (fat_name_match(sbi, name, name_len, bufname, len))
- goto found;
+ if (last_u) {
+ /* Compare shortname */
+ bufuname[last_u] = 0x0000;
+ len = fat_uni_to_x8(sbi, bufuname, bufname, sizeof(bufname));
+ if (fat_name_match(sbi, name, name_len, bufname, len))
+ goto found;
+ }
if (nr_slots) {
void *longname = unicode + FAT_MAX_UNI_CHARS;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 73471b7ecc8c..9555a461db41 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -22,6 +22,7 @@
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/namei.h>
+#include <linux/random.h>
#include "fat.h"
/*
@@ -316,6 +317,11 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
int is_shortname;
struct shortname_info base_info, ext_info;
+ unsigned opts_shortname = opts->shortname;
+
+#ifndef CONFIG_VFAT_FS_DUALNAMES
+ opts_shortname = VFAT_SFN_CREATE_WINNT;
+#endif
is_shortname = 1;
INIT_SHORTNAME_INFO(&base_info);
@@ -428,9 +434,9 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
if (vfat_find_form(dir, name_res) == 0)
return -EEXIST;
- if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
+ if (opts_shortname & VFAT_SFN_CREATE_WIN95) {
return (base_info.upper && ext_info.upper);
- } else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
+ } else if (opts_shortname & VFAT_SFN_CREATE_WINNT) {
if ((base_info.upper || base_info.lower) &&
(ext_info.upper || ext_info.lower)) {
if (!base_info.upper && base_info.lower)
@@ -586,6 +592,59 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
return 0;
}
+#ifndef CONFIG_VFAT_FS_DUALNAMES
+/*
+ * build a 11 byte 8.3 buffer which is not a short filename. We want 11
+ * bytes which:
+ * - will be seen as a constant string to all APIs on Linux and Windows
+ * - cannot be matched with wildcard patterns
+ * - cannot be used to access the file
+ * - has a low probability of collision within a directory
+ * - has an invalid 3 byte extension
+ * - contains at least one non-space and non-nul byte
+ */
+static void vfat_build_dummy_83_buffer(struct inode *dir, char *msdos_name)
+{
+ u32 rand_num = random32() & 0x3FFFFFFF;
+ int i;
+
+ /* a value of zero would leave us with only nul and spaces,
+ * which would not work with older linux systems
+ */
+ if (rand_num == 0)
+ rand_num = 1;
+
+ /* we start with a space followed by nul as spaces at the
+ * start of an entry are trimmed in FAT, which means that
+ * starting the 11 bytes with 0x20 0x00 gives us a value which
+ * cannot be used to access the file. It also means that the
+ * value as seen from all Windows and Linux APIs is a constant
+ */
+ msdos_name[0] = ' ';
+ msdos_name[1] = 0;
+
+ /* we use / and 2 nul bytes for the extension. These are
+ * invalid in FAT and mean that utilities that show the
+ * directory show no extension, but still work via the long
+ * name for old Linux kernels
+ */
+ msdos_name[8] = '/';
+ msdos_name[9] = 0;
+ msdos_name[10] = 0;
+
+ /*
+ * fill the remaining 6 bytes with random invalid values
+ * This gives us a low collision rate, which means a low
+ * chance of problems with chkdsk.exe and WindowsXP
+ */
+ for (i = 2; i < 8; i++) {
+ msdos_name[i] = rand_num & 0x1F;
+ rand_num >>= 5;
+ }
+}
+#endif
+
+
static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
struct timespec *ts,
@@ -628,6 +687,11 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
goto shortname;
}
+#ifndef CONFIG_VFAT_FS_DUALNAMES
+ vfat_build_dummy_83_buffer(dir, msdos_name);
+ lcase = 0;
+#endif
+
/* build the entry of long file name */
cksum = fat_checksum(msdos_name);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c54226be5294..e657d57e605f 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -19,49 +19,516 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include "internal.h"
+#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-/**
- * writeback_acquire - attempt to get exclusive writeback access to a device
- * @bdi: the device's backing_dev_info structure
- *
- * It is a waste of resources to have more than one pdflush thread blocked on
- * a single request queue. Exclusion at the request_queue level is obtained
- * via a flag in the request_queue's backing_dev_info.state.
- *
- * Non-request_queue-backed address_spaces will share default_backing_dev_info,
- * unless they implement their own. Which is somewhat inefficient, as this
- * may prevent concurrent writeback against multiple devices.
+static void generic_sync_wb_inodes(struct bdi_writeback *wb,
+ struct super_block *sb,
+ struct writeback_control *wbc);
+
+/*
+ * We don't actually have pdflush, but this one is exported though /proc...
*/
-static int writeback_acquire(struct backing_dev_info *bdi)
+int nr_pdflush_threads;
+
+static void generic_sync_wb_inodes(struct bdi_writeback *wb,
+ struct super_block *sb,
+ struct writeback_control *wbc);
+
+/*
+ * Work items for the bdi_writeback threads
+ */
+struct bdi_work {
+ struct list_head list;
+ struct list_head wait_list;
+ struct rcu_head rcu_head;
+
+ unsigned long seen;
+ atomic_t pending;
+
+ unsigned long sb_data;
+ unsigned long nr_pages;
+ enum writeback_sync_modes sync_mode;
+
+ unsigned long state;
+};
+
+static struct super_block *bdi_work_sb(struct bdi_work *work)
+{
+ return (struct super_block *) (work->sb_data & ~1UL);
+}
+
+static inline bool bdi_work_on_stack(struct bdi_work *work)
+{
+ return work->sb_data & 1UL;
+}
+
+static inline void bdi_work_init(struct bdi_work *work, struct super_block *sb,
+ unsigned long nr_pages,
+ enum writeback_sync_modes sync_mode)
{
- return !test_and_set_bit(BDI_pdflush, &bdi->state);
+ INIT_RCU_HEAD(&work->rcu_head);
+ work->sb_data = (unsigned long) sb;
+ work->nr_pages = nr_pages;
+ work->sync_mode = sync_mode;
+ work->state = 1;
+}
+
+static inline void bdi_work_init_on_stack(struct bdi_work *work,
+ struct super_block *sb,
+ unsigned long nr_pages,
+ enum writeback_sync_modes sync_mode)
+{
+ bdi_work_init(work, sb, nr_pages, sync_mode);
+ work->sb_data |= 1UL;
}
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
*
- * Determine whether there is writeback in progress against a backing device.
+ * Determine whether there is writeback waiting to be handled against a
+ * backing device.
*/
int writeback_in_progress(struct backing_dev_info *bdi)
{
- return test_bit(BDI_pdflush, &bdi->state);
+ return !list_empty(&bdi->work_list);
}
-/**
- * writeback_release - relinquish exclusive writeback access against a device.
- * @bdi: the device's backing_dev_info structure
+static void bdi_work_clear(struct bdi_work *work)
+{
+ clear_bit(0, &work->state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&work->state, 0);
+}
+
+static void bdi_work_free(struct rcu_head *head)
+{
+ struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
+
+ if (!bdi_work_on_stack(work))
+ kfree(work);
+ else
+ bdi_work_clear(work);
+}
+
+static void wb_work_complete(struct bdi_work *work)
+{
+ const enum writeback_sync_modes sync_mode = work->sync_mode;
+
+ /*
+ * For allocated work, we can clear the done/seen bit right here.
+ * For on-stack work, we need to postpone both the clear and free
+ * to after the RCU grace period, since the stack could be invalidated
+ * as soon as bdi_work_clear() has done the wakeup.
+ */
+ if (!bdi_work_on_stack(work))
+ bdi_work_clear(work);
+ if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
+ call_rcu(&work->rcu_head, bdi_work_free);
+}
+
+static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
+{
+ /*
+ * The caller has retrieved the work arguments from this work,
+ * drop our reference. If this is the last ref, delete and free it
+ */
+ if (atomic_dec_and_test(&work->pending)) {
+ struct backing_dev_info *bdi = wb->bdi;
+
+ spin_lock(&bdi->wb_lock);
+ list_del_rcu(&work->list);
+ spin_unlock(&bdi->wb_lock);
+
+ wb_work_complete(work);
+ }
+}
+
+static void wb_start_writeback(struct bdi_writeback *wb, struct bdi_work *work)
+{
+ /*
+ * If we failed allocating the bdi work item, wake up the wb thread
+ * always. As a safety precaution, it'll flush out everything
+ */
+ if (!wb_has_dirty_io(wb) && work)
+ wb_clear_pending(wb, work);
+ else if (wb->task)
+ wake_up_process(wb->task);
+}
+
+static void bdi_sched_work(struct backing_dev_info *bdi, struct bdi_work *work)
+{
+ if (!bdi_wblist_needs_lock(bdi))
+ wb_start_writeback(&bdi->wb, work);
+ else {
+ struct bdi_writeback *wb;
+ int idx;
+
+ idx = srcu_read_lock(&bdi->srcu);
+
+ list_for_each_entry_rcu(wb, &bdi->wb_list, list)
+ wb_start_writeback(wb, work);
+
+ srcu_read_unlock(&bdi->srcu, idx);
+ }
+}
+
+static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
+{
+ if (work) {
+ work->seen = bdi->wb_mask;
+ BUG_ON(!work->seen);
+ atomic_set(&work->pending, bdi->wb_cnt);
+ BUG_ON(!bdi->wb_cnt);
+
+ /*
+ * Make sure stores are seen before it appears on the list
+ */
+ smp_mb();
+
+ spin_lock(&bdi->wb_lock);
+ list_add_tail_rcu(&work->list, &bdi->work_list);
+ spin_unlock(&bdi->wb_lock);
+ }
+
+ /*
+ * If the default thread isn't there, make sure we add it. When
+ * it gets created and wakes up, we'll run this work.
+ */
+ if (unlikely(list_empty_careful(&bdi->wb_list)))
+ wake_up_process(default_backing_dev_info.wb.task);
+ else
+ bdi_sched_work(bdi, work);
+}
+
+/*
+ * Used for on-stack allocated work items. The caller needs to wait until
+ * the wb threads have acked the work before it's safe to continue.
*/
-static void writeback_release(struct backing_dev_info *bdi)
+static void bdi_wait_on_work_clear(struct bdi_work *work)
{
- BUG_ON(!writeback_in_progress(bdi));
- clear_bit(BDI_pdflush, &bdi->state);
+ wait_on_bit(&work->state, 0, bdi_sched_wait, TASK_UNINTERRUPTIBLE);
+}
+
+static struct bdi_work *bdi_alloc_work(struct super_block *sb, long nr_pages,
+ enum writeback_sync_modes sync_mode)
+{
+ struct bdi_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work)
+ bdi_work_init(work, sb, nr_pages, sync_mode);
+
+ return work;
+}
+
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+ long nr_pages, enum writeback_sync_modes sync_mode)
+{
+ const bool must_wait = sync_mode == WB_SYNC_ALL;
+ struct bdi_work work_stack, *work = NULL;
+
+ if (!must_wait)
+ work = bdi_alloc_work(sb, nr_pages, sync_mode);
+
+ if (!work) {
+ work = &work_stack;
+ bdi_work_init_on_stack(work, sb, nr_pages, sync_mode);
+ }
+
+ bdi_queue_work(bdi, work);
+
+ /*
+ * If the sync mode is WB_SYNC_ALL, block waiting for the work to
+ * complete. If not, we only need to wait for the work to be started,
+ * if we allocated it on-stack. We use the same mechanism, if the
+ * wait bit is set in the bdi_work struct, then threads will not
+ * clear pending until after they are done.
+ *
+ * Note that work == &work_stack if must_wait is true, so we don't
+ * need to do call_rcu() here ever, since the completion path will
+ * have done that for us.
+ */
+ if (must_wait || work == &work_stack) {
+ bdi_wait_on_work_clear(work);
+ if (work != &work_stack)
+ call_rcu(&work->rcu_head, bdi_work_free);
+ }
+}
+
+/*
+ * The maximum number of pages to writeout in a single bdi flush/kupdate
+ * operation. We do this so we don't hold I_SYNC against an inode for
+ * enormous amounts of time, which would block a userspace task which has
+ * been forced to throttle against that inode. Also, the code reevaluates
+ * the dirty each time it has written this many pages.
+ */
+#define MAX_WRITEBACK_PAGES 1024
+
+static inline bool over_bground_thresh(void)
+{
+ unsigned long background_thresh, dirty_thresh;
+
+ get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+
+ return (global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
+}
+
+/*
+ * Explicit flushing or periodic writeback of "old" data.
+ *
+ * Define "old": the first time one of an inode's pages is dirtied, we mark the
+ * dirtying-time in the inode's address_space. So this periodic writeback code
+ * just walks the superblock inode list, writing back any inodes which are
+ * older than a specific point in time.
+ *
+ * Try to run once per dirty_writeback_interval. But if a writeback event
+ * takes longer than a dirty_writeback_interval interval, then leave a
+ * one-second gap.
+ *
+ * older_than_this takes precedence over nr_to_write. So we'll only write back
+ * all dirty pages if they are all attached to "old" mappings.
+ */
+static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
+ struct super_block *sb,
+ enum writeback_sync_modes sync_mode, int for_kupdate)
+{
+ struct writeback_control wbc = {
+ .bdi = wb->bdi,
+ .sync_mode = sync_mode,
+ .older_than_this = NULL,
+ .for_kupdate = for_kupdate,
+ .range_cyclic = 1,
+ };
+ unsigned long oldest_jif;
+ long wrote = 0;
+
+ if (wbc.for_kupdate) {
+ wbc.older_than_this = &oldest_jif;
+ oldest_jif = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
+ }
+
+ for (;;) {
+ if (sync_mode == WB_SYNC_NONE && nr_pages <= 0 &&
+ !over_bground_thresh())
+ break;
+
+ wbc.more_io = 0;
+ wbc.encountered_congestion = 0;
+ wbc.nr_to_write = MAX_WRITEBACK_PAGES;
+ wbc.pages_skipped = 0;
+ generic_sync_wb_inodes(wb, sb, &wbc);
+ nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ /*
+ * If we ran out of stuff to write, bail unless more_io got set
+ */
+ if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
+ if (wbc.more_io && !wbc.for_kupdate)
+ continue;
+ break;
+ }
+ }
+
+ return wrote;
+}
+
+/*
+ * Return the next bdi_work struct that hasn't been processed by this
+ * wb thread yet
+ */
+static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
+ struct bdi_writeback *wb)
+{
+ struct bdi_work *work, *ret = NULL;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(work, &bdi->work_list, list) {
+ if (!test_and_clear_bit(wb->nr, &work->seen))
+ continue;
+
+ ret = work;
+ break;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Retrieve work items and do the writeback they describe
+ */
+long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
+{
+ struct backing_dev_info *bdi = wb->bdi;
+ struct bdi_work *work;
+ long nr_pages, wrote = 0;
+
+ while ((work = get_next_work_item(bdi, wb)) != NULL) {
+ struct super_block *sb = bdi_work_sb(work);
+ enum writeback_sync_modes sync_mode;
+
+ nr_pages = work->nr_pages;
+
+ /*
+ * Override sync mode, in case we must wait for completion
+ */
+ if (force_wait)
+ work->sync_mode = sync_mode = WB_SYNC_ALL;
+ else
+ sync_mode = work->sync_mode;
+
+ /*
+ * If this isn't a data integrity operation, just notify
+ * that we have seen this work and we are now starting it.
+ */
+ if (sync_mode == WB_SYNC_NONE)
+ wb_clear_pending(wb, work);
+
+ wrote += wb_writeback(wb, nr_pages, sb, sync_mode, 0);
+
+ /*
+ * This is a data integrity writeback, so only do the
+ * notification when we have completed the work.
+ */
+ if (sync_mode == WB_SYNC_ALL)
+ wb_clear_pending(wb, work);
+ }
+
+ /*
+ * Check for periodic writeback, kupdated() style
+ */
+ if (!wrote) {
+ nr_pages = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) +
+ (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+
+ wrote = wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1);
+ }
+
+ return wrote;
+}
+
+/*
+ * Handle writeback of dirty data for the device backed by this bdi. Also
+ * wakes up periodically and does kupdated style flushing.
+ */
+int bdi_writeback_task(struct bdi_writeback *wb)
+{
+ unsigned long last_active = jiffies;
+ unsigned long wait_jiffies = -1UL;
+ long pages_written;
+
+ while (!kthread_should_stop()) {
+ pages_written = wb_do_writeback(wb, 0);
+
+ if (pages_written)
+ last_active = jiffies;
+ else if (wait_jiffies != -1UL) {
+ unsigned long max_idle;
+
+ /*
+ * Longest period of inactivity that we tolerate. If we
+ * see dirty data again later, the task will get
+ * recreated automatically.
+ */
+ max_idle = max(5UL * 60 * HZ, wait_jiffies);
+ if (time_after(jiffies, max_idle + last_active) &&
+ wb_is_default_task(wb))
+ break;
+ }
+
+ wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(wait_jiffies);
+ try_to_freeze();
+ }
+
+ return 0;
+}
+
+/*
+ * Schedule writeback for all backing devices. Expensive! If this is a data
+ * integrity operation, writeback will be complete when this returns. If
+ * we are simply called for WB_SYNC_NONE, then writeback will merely be
+ * scheduled to run.
+ */
+void bdi_writeback_all(struct super_block *sb, struct writeback_control *wbc)
+{
+ const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
+ struct backing_dev_info *bdi;
+ struct bdi_work *work;
+ LIST_HEAD(list);
+
+ /*
+ * If this isn't a data integrity writeback, just drop it if
+ * someone is already holding the bdi_lock
+ */
+ if (!spin_trylock(&bdi_lock)) {
+ if (!must_wait)
+ return;
+ spin_lock(&bdi_lock);
+ }
+
+restart:
+ list_for_each_entry(bdi, &bdi_list, bdi_list) {
+ struct bdi_work *work;
+
+ if (!bdi_has_dirty_io(bdi))
+ continue;
+
+ /*
+ * If work allocation fails, do the writes inline. We drop
+ * the lock and restart the list writeout. This should be OK,
+ * since this happens rarely and because the writeout should
+ * eventually make more free memory available.
+ */
+ work = bdi_alloc_work(sb, wbc->nr_to_write, wbc->sync_mode);
+ if (!work) {
+ struct writeback_control __wbc = *wbc;
+
+ /*
+ * Not a data integrity writeout, just continue
+ */
+ if (!must_wait)
+ continue;
+
+ spin_unlock(&bdi_lock);
+ __wbc = *wbc;
+ __wbc.bdi = bdi;
+ generic_sync_bdi_inodes(sb, &__wbc);
+ spin_lock(&bdi_lock);
+ goto restart;
+ }
+ if (must_wait)
+ list_add_tail(&work->wait_list, &list);
+
+ bdi_queue_work(bdi, work);
+ }
+
+ spin_unlock(&bdi_lock);
+
+ /*
+ * If this is for WB_SYNC_ALL, wait for pending work to complete
+ * before returning.
+ */
+ while (!list_empty(&list)) {
+ work = list_entry(list.next, struct bdi_work, wait_list);
+ list_del(&work->wait_list);
+ bdi_wait_on_work_clear(work);
+ call_rcu(&work->rcu_head, bdi_work_free);
+ }
}
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
@@ -86,6 +553,21 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
}
}
+/*
+ * If the filesystem didn't provide a way to map an inode to a dedicated
+ * flusher thread, it doesn't support more than 1 thread. So we know it's
+ * the default thread, return that.
+ */
+static inline struct bdi_writeback *inode_get_wb(struct inode *inode)
+{
+ const struct super_operations *sop = inode->i_sb->s_op;
+
+ if (!sop->inode_get_wb)
+ return &inode_to_bdi(inode)->wb;
+
+ return sop->inode_get_wb(inode);
+}
+
/**
* __mark_inode_dirty - internal function
* @inode: inode to mark
@@ -165,12 +647,21 @@ void __mark_inode_dirty(struct inode *inode, int flags)
goto out;
/*
- * If the inode was already on s_dirty/s_io/s_more_io, don't
- * reposition it (that would break s_dirty time-ordering).
+ * If the inode was already on b_dirty/b_io/b_more_io, don't
+ * reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
+ struct bdi_writeback *wb = inode_get_wb(inode);
+ struct backing_dev_info *bdi = wb->bdi;
+
+ if (bdi_cap_writeback_dirty(bdi) &&
+ !test_bit(BDI_registered, &bdi->state)) {
+ WARN_ON(1);
+ printk("bdi-%s not registered\n", bdi->name);
+ }
+
inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &sb->s_dirty);
+ list_move(&inode->i_list, &wb->b_dirty);
}
}
out:
@@ -191,31 +682,32 @@ static int write_inode(struct inode *inode, int sync)
* furthest end of its superblock's dirty-inode list.
*
* Before stamping the inode's ->dirtied_when, we check to see whether it is
- * already the most-recently-dirtied inode on the s_dirty list. If that is
+ * already the most-recently-dirtied inode on the b_dirty list. If that is
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
static void redirty_tail(struct inode *inode)
{
- struct super_block *sb = inode->i_sb;
+ struct bdi_writeback *wb = inode_get_wb(inode);
- if (!list_empty(&sb->s_dirty)) {
- struct inode *tail_inode;
+ if (!list_empty(&wb->b_dirty)) {
+ struct inode *tail;
- tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
- if (time_before(inode->dirtied_when,
- tail_inode->dirtied_when))
+ tail = list_entry(wb->b_dirty.next, struct inode, i_list);
+ if (time_before(inode->dirtied_when, tail->dirtied_when))
inode->dirtied_when = jiffies;
}
- list_move(&inode->i_list, &sb->s_dirty);
+ list_move(&inode->i_list, &wb->b_dirty);
}
/*
- * requeue inode for re-scanning after sb->s_io list is exhausted.
+ * requeue inode for re-scanning after bdi->b_io list is exhausted.
*/
static void requeue_io(struct inode *inode)
{
- list_move(&inode->i_list, &inode->i_sb->s_more_io);
+ struct bdi_writeback *wb = inode_get_wb(inode);
+
+ list_move(&inode->i_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
@@ -262,21 +754,12 @@ static void move_expired_inodes(struct list_head *delaying_queue,
/*
* Queue all expired dirty inodes for io, eldest first.
*/
-static void queue_io(struct super_block *sb,
- unsigned long *older_than_this)
+static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
{
- list_splice_init(&sb->s_more_io, sb->s_io.prev);
- move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
+ list_splice_init(&wb->b_more_io, wb->b_io.prev);
+ move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
-int sb_has_dirty_inodes(struct super_block *sb)
-{
- return !list_empty(&sb->s_dirty) ||
- !list_empty(&sb->s_io) ||
- !list_empty(&sb->s_more_io);
-}
-EXPORT_SYMBOL(sb_has_dirty_inodes);
-
/*
* Wait for writeback on an inode to complete.
*/
@@ -322,11 +805,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (inode->i_state & I_SYNC) {
/*
* If this inode is locked for writeback and we are not doing
- * writeback-for-data-integrity, move it to s_more_io so that
+ * writeback-for-data-integrity, move it to b_more_io so that
* writeback can proceed with the other inodes on s_io.
*
* We'll have another go at writing back this inode when we
- * completed a full scan of s_io.
+ * completed a full scan of b_io.
*/
if (!wait) {
requeue_io(inode);
@@ -371,11 +854,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything. Redirty
- * the inode; Move it from s_io onto s_more_io/s_dirty.
+ * the inode; Move it from b_io onto b_more_io/b_dirty.
*/
/*
* akpm: if the caller was the kupdate function we put
- * this inode at the head of s_dirty so it gets first
+ * this inode at the head of b_dirty so it gets first
* consideration. Otherwise, move it to the tail, for
* the reasons described there. I'm not really sure
* how much sense this makes. Presumably I had a good
@@ -385,7 +868,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->for_kupdate) {
/*
* For the kupdate function we move the inode
- * to s_more_io so it will get more writeout as
+ * to b_more_io so it will get more writeout as
* soon as the queue becomes uncongested.
*/
inode->i_state |= I_DIRTY_PAGES;
@@ -433,51 +916,34 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
-/*
- * Write out a superblock's list of dirty inodes. A wait will be performed
- * upon no inodes, all inodes or the final one, depending upon sync_mode.
- *
- * If older_than_this is non-NULL, then only write out inodes which
- * had their first dirtying at a time earlier than *older_than_this.
- *
- * If we're a pdflush thread, then implement pdflush collision avoidance
- * against the entire list.
- *
- * If `bdi' is non-zero then we're being asked to writeback a specific queue.
- * This function assumes that the blockdev superblock's inodes are backed by
- * a variety of queues, so all inodes are searched. For other superblocks,
- * assume that all inodes are backed by the same queue.
- *
- * FIXME: this linear search could get expensive with many fileystems. But
- * how to fix? We need to go from an address_space to all inodes which share
- * a queue with that address_space. (Easy: have a global "dirty superblocks"
- * list).
- *
- * The inodes to be written are parked on sb->s_io. They are moved back onto
- * sb->s_dirty as they are selected for writing. This way, none can be missed
- * on the writer throttling path, and we get decent balancing between many
- * throttled threads: we don't want them all piling up on inode_sync_wait.
- */
-void generic_sync_sb_inodes(struct super_block *sb,
- struct writeback_control *wbc)
+static void generic_sync_wb_inodes(struct bdi_writeback *wb,
+ struct super_block *sb,
+ struct writeback_control *wbc)
{
+ const int is_blkdev_sb = sb_is_blkdev_sb(sb);
const unsigned long start = jiffies; /* livelock avoidance */
- int sync = wbc->sync_mode == WB_SYNC_ALL;
spin_lock(&inode_lock);
- if (!wbc->for_kupdate || list_empty(&sb->s_io))
- queue_io(sb, wbc->older_than_this);
- while (!list_empty(&sb->s_io)) {
- struct inode *inode = list_entry(sb->s_io.prev,
+ if (!wbc->for_kupdate || list_empty(&wb->b_io))
+ queue_io(wb, wbc->older_than_this);
+
+ while (!list_empty(&wb->b_io)) {
+ struct inode *inode = list_entry(wb->b_io.prev,
struct inode, i_list);
- struct address_space *mapping = inode->i_mapping;
- struct backing_dev_info *bdi = mapping->backing_dev_info;
long pages_skipped;
- if (!bdi_cap_writeback_dirty(bdi)) {
+ /*
+ * super block given and doesn't match, skip this inode
+ */
+ if (sb && sb != inode->i_sb) {
redirty_tail(inode);
- if (sb_is_blkdev_sb(sb)) {
+ continue;
+ }
+
+ if (!bdi_cap_writeback_dirty(wb->bdi)) {
+ redirty_tail(inode);
+ if (is_blkdev_sb) {
/*
* Dirty memory-backed blockdev: the ramdisk
* driver does this. Skip just this inode
@@ -497,21 +963,14 @@ void generic_sync_sb_inodes(struct super_block *sb,
continue;
}
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
+ if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
wbc->encountered_congestion = 1;
- if (!sb_is_blkdev_sb(sb))
+ if (!is_blkdev_sb)
break; /* Skip a congested fs */
requeue_io(inode);
continue; /* Skip a congested blockdev */
}
- if (wbc->bdi && bdi != wbc->bdi) {
- if (!sb_is_blkdev_sb(sb))
- break; /* fs has the wrong queue */
- requeue_io(inode);
- continue; /* blockdev has wrong queue */
- }
-
/*
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
@@ -519,16 +978,10 @@ void generic_sync_sb_inodes(struct super_block *sb,
if (inode_dirtied_after(inode, start))
break;
- /* Is another pdflush already flushing this queue? */
- if (current_is_pdflush() && !writeback_acquire(bdi))
- break;
-
BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
- if (current_is_pdflush())
- writeback_release(bdi);
if (wbc->pages_skipped != pages_skipped) {
/*
* writeback is not making progress due to locked
@@ -544,13 +997,71 @@ void generic_sync_sb_inodes(struct super_block *sb,
wbc->more_io = 1;
break;
}
- if (!list_empty(&sb->s_more_io))
+ if (!list_empty(&wb->b_more_io))
wbc->more_io = 1;
}
- if (sync) {
+ spin_unlock(&inode_lock);
+ /* Leave any unwritten inodes on b_io */
+}
+
+void generic_sync_bdi_inodes(struct super_block *sb,
+ struct writeback_control *wbc)
+{
+ struct backing_dev_info *bdi = wbc->bdi;
+ struct bdi_writeback *wb;
+
+ /*
+ * Common case is just a single wb thread and that is embedded in
+ * the bdi, so it doesn't need locking
+ */
+ if (!bdi_wblist_needs_lock(bdi))
+ generic_sync_wb_inodes(&bdi->wb, sb, wbc);
+ else {
+ int idx;
+
+ idx = srcu_read_lock(&bdi->srcu);
+
+ list_for_each_entry_rcu(wb, &bdi->wb_list, list)
+ generic_sync_wb_inodes(wb, sb, wbc);
+
+ srcu_read_unlock(&bdi->srcu, idx);
+ }
+}
+
+/*
+ * Write out a superblock's list of dirty inodes. A wait will be performed
+ * upon no inodes, all inodes or the final one, depending upon sync_mode.
+ *
+ * If older_than_this is non-NULL, then only write out inodes which
+ * had their first dirtying at a time earlier than *older_than_this.
+ *
+ * If we're a pdlfush thread, then implement pdflush collision avoidance
+ * against the entire list.
+ *
+ * If `bdi' is non-zero then we're being asked to writeback a specific queue.
+ * This function assumes that the blockdev superblock's inodes are backed by
+ * a variety of queues, so all inodes are searched. For other superblocks,
+ * assume that all inodes are backed by the same queue.
+ *
+ * The inodes to be written are parked on bdi->b_io. They are moved back onto
+ * bdi->b_dirty as they are selected for writing. This way, none can be missed
+ * on the writer throttling path, and we get decent balancing between many
+ * throttled threads: we don't want them all piling up on inode_sync_wait.
+ */
+void generic_sync_sb_inodes(struct super_block *sb,
+ struct writeback_control *wbc)
+{
+ if (wbc->bdi)
+ bdi_start_writeback(wbc->bdi, sb, wbc->nr_to_write, wbc->sync_mode);
+ else
+ bdi_writeback_all(sb, wbc);
+
+ if (wbc->sync_mode == WB_SYNC_ALL) {
struct inode *inode, *old_inode = NULL;
+ spin_lock(&inode_lock);
+
/*
* Data integrity sync. Must wait for all pages under writeback,
* because there may have been pages dirtied before our sync
@@ -588,10 +1099,8 @@ void generic_sync_sb_inodes(struct super_block *sb,
}
spin_unlock(&inode_lock);
iput(old_inode);
- } else
- spin_unlock(&inode_lock);
+ }
- return; /* Leave any unwritten inodes on s_io */
}
EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
@@ -602,58 +1111,6 @@ static void sync_sb_inodes(struct super_block *sb,
}
/*
- * Start writeback of dirty pagecache data against all unlocked inodes.
- *
- * Note:
- * We don't need to grab a reference to superblock here. If it has non-empty
- * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
- * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
- * empty. Since __sync_single_inode() regains inode_lock before it finally moves
- * inode from superblock lists we are OK.
- *
- * If `older_than_this' is non-zero then only flush inodes which have a
- * flushtime older than *older_than_this.
- *
- * If `bdi' is non-zero then we will scan the first inode against each
- * superblock until we find the matching ones. One group will be the dirty
- * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
- * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
- * super-efficient but we're about to do a ton of I/O...
- */
-void
-writeback_inodes(struct writeback_control *wbc)
-{
- struct super_block *sb;
-
- might_sleep();
- spin_lock(&sb_lock);
-restart:
- list_for_each_entry_reverse(sb, &super_blocks, s_list) {
- if (sb_has_dirty_inodes(sb)) {
- /* we're making our own get_super here */
- sb->s_count++;
- spin_unlock(&sb_lock);
- /*
- * If we can't get the readlock, there's no sense in
- * waiting around, most of the time the FS is going to
- * be unmounted by the time it is released.
- */
- if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root)
- sync_sb_inodes(sb, wbc);
- up_read(&sb->s_umount);
- }
- spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
- }
- if (wbc->nr_to_write <= 0)
- break;
- }
- spin_unlock(&sb_lock);
-}
-
-/*
* writeback and wait upon the filesystem's dirty inodes. The caller will
* do this in two passes - one to write, and one to wait.
*
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8fed2ed12f38..f58ecbc416c8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -849,6 +849,81 @@ err:
return err;
}
+static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_inval_inode_out outarg;
+ int err = -EINVAL;
+
+ if (size != sizeof(outarg))
+ goto err;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto err;
+ fuse_copy_finish(cs);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (!fc->sb)
+ goto err_unlock;
+
+ err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
+ outarg.off, outarg.len);
+
+err_unlock:
+ up_read(&fc->killsb);
+ return err;
+
+err:
+ fuse_copy_finish(cs);
+ return err;
+}
+
+static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_inval_entry_out outarg;
+ int err = -EINVAL;
+ char buf[FUSE_NAME_MAX+1];
+ struct qstr name;
+
+ if (size < sizeof(outarg))
+ goto err;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto err;
+
+ err = -ENAMETOOLONG;
+ if (outarg.namelen > FUSE_NAME_MAX)
+ goto err;
+
+ name.name = buf;
+ name.len = outarg.namelen;
+ err = fuse_copy_one(cs, buf, outarg.namelen + 1);
+ if (err)
+ goto err;
+ fuse_copy_finish(cs);
+ buf[outarg.namelen] = 0;
+ name.hash = full_name_hash(name.name, name.len);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (!fc->sb)
+ goto err_unlock;
+
+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
+
+err_unlock:
+ up_read(&fc->killsb);
+ return err;
+
+err:
+ fuse_copy_finish(cs);
+ return err;
+}
+
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
@@ -856,6 +931,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
case FUSE_NOTIFY_POLL:
return fuse_notify_poll(fc, size, cs);
+ case FUSE_NOTIFY_INVAL_INODE:
+ return fuse_notify_inval_inode(fc, size, cs);
+
+ case FUSE_NOTIFY_INVAL_ENTRY:
+ return fuse_notify_inval_entry(fc, size, cs);
+
default:
fuse_copy_finish(cs);
return -EINVAL;
@@ -910,7 +991,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
int err;
- unsigned nbytes = iov_length(iov, nr_segs);
+ size_t nbytes = iov_length(iov, nr_segs);
struct fuse_req *req;
struct fuse_out_header oh;
struct fuse_copy_state cs;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b3089a083d30..e703654e7f40 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -375,7 +375,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_req *req;
struct fuse_req *forget_req;
- struct fuse_open_in inarg;
+ struct fuse_create_in inarg;
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
struct fuse_file *ff;
@@ -399,15 +399,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
if (!ff)
goto out_put_request;
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
flags &= ~O_NOCTTY;
memset(&inarg, 0, sizeof(inarg));
memset(&outentry, 0, sizeof(outentry));
inarg.flags = flags;
inarg.mode = mode;
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_CREATE;
req->in.h.nodeid = get_node_id(dir);
req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
+ sizeof(inarg);
req->in.args[0].value = &inarg;
req->in.args[1].size = entry->d_name.len + 1;
req->in.args[1].value = entry->d_name.name;
@@ -546,12 +551,17 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
if (IS_ERR(req))
return PTR_ERR(req);
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_MKNOD;
req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
+ sizeof(inarg);
req->in.args[0].value = &inarg;
req->in.args[1].size = entry->d_name.len + 1;
req->in.args[1].value = entry->d_name.name;
@@ -578,8 +588,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
if (IS_ERR(req))
return PTR_ERR(req);
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_MKDIR;
req->in.numargs = 2;
req->in.args[0].size = sizeof(inarg);
@@ -845,6 +859,43 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
return err;
}
+int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
+ struct qstr *name)
+{
+ int err = -ENOTDIR;
+ struct inode *parent;
+ struct dentry *dir;
+ struct dentry *entry;
+
+ parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
+ if (!parent)
+ return -ENOENT;
+
+ mutex_lock(&parent->i_mutex);
+ if (!S_ISDIR(parent->i_mode))
+ goto unlock;
+
+ err = -ENOENT;
+ dir = d_find_alias(parent);
+ if (!dir)
+ goto unlock;
+
+ entry = d_lookup(dir, name);
+ dput(dir);
+ if (!entry)
+ goto unlock;
+
+ fuse_invalidate_attr(parent);
+ fuse_invalidate_entry(entry);
+ dput(entry);
+ err = 0;
+
+ unlock:
+ mutex_unlock(&parent->i_mutex);
+ iput(parent);
+ return err;
+}
+
/*
* Calling into a user-controlled filesystem gives the filesystem
* daemon ptrace-like capabilities over the requester process. This
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index fce6ce694fde..cbc464043b6f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1922,7 +1922,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
req = fuse_get_req(fc);
if (IS_ERR(req))
- return PTR_ERR(req);
+ return POLLERR;
req->in.h.opcode = FUSE_POLL;
req->in.h.nodeid = ff->nodeid;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index aaf2f9ff970e..52b641fc0faf 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -446,6 +446,9 @@ struct fuse_conn {
/** Do multi-page cached writes */
unsigned big_writes:1;
+ /** Don't apply umask to creation modes */
+ unsigned dont_mask:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -481,6 +484,12 @@ struct fuse_conn {
/** Called on final put */
void (*release)(struct fuse_conn *);
+
+ /** Super block for this connection. */
+ struct super_block *sb;
+
+ /** Read/write semaphore to hold when accessing sb. */
+ struct rw_semaphore killsb;
};
static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -509,6 +518,11 @@ extern const struct file_operations fuse_dev_operations;
extern const struct dentry_operations fuse_dentry_operations;
/**
+ * Inode to nodeid comparison.
+ */
+int fuse_inode_eq(struct inode *inode, void *_nodeidp);
+
+/**
* Get a filled in inode
*/
struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
@@ -708,6 +722,19 @@ void fuse_release_nowrite(struct inode *inode);
u64 fuse_get_attr_version(struct fuse_conn *fc);
+/**
+ * File-system tells the kernel to invalidate cache for the given node id.
+ */
+int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
+ loff_t offset, loff_t len);
+
+/**
+ * File-system tells the kernel to invalidate parent attributes and
+ * the dentry matching parent/name.
+ */
+int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
+ struct qstr *name);
+
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir);
ssize_t fuse_direct_io(struct file *file, const char __user *buf,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index d8673ccf90b7..4567db6f9430 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -206,7 +206,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
BUG();
}
-static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
+int fuse_inode_eq(struct inode *inode, void *_nodeidp)
{
u64 nodeid = *(u64 *) _nodeidp;
if (get_node_id(inode) == nodeid)
@@ -257,6 +257,31 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
return inode;
}
+int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode;
+ pgoff_t pg_start;
+ pgoff_t pg_end;
+
+ inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
+ if (!inode)
+ return -ENOENT;
+
+ fuse_invalidate_attr(inode);
+ if (offset >= 0) {
+ pg_start = offset >> PAGE_CACHE_SHIFT;
+ if (len <= 0)
+ pg_end = -1;
+ else
+ pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pg_start, pg_end);
+ }
+ iput(inode);
+ return 0;
+}
+
static void fuse_umount_begin(struct super_block *sb)
{
fuse_abort_conn(get_fuse_conn_super(sb));
@@ -480,6 +505,7 @@ void fuse_conn_init(struct fuse_conn *fc)
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
mutex_init(&fc->inst_mutex);
+ init_rwsem(&fc->killsb);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->waitq);
init_waitqueue_head(&fc->blocked_waitq);
@@ -725,6 +751,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
}
if (arg->flags & FUSE_BIG_WRITES)
fc->big_writes = 1;
+ if (arg->flags & FUSE_DONT_MASK)
+ fc->dont_mask = 1;
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -748,7 +776,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->minor = FUSE_KERNEL_MINOR_VERSION;
arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
- FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES;
+ FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -773,6 +801,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
{
int err;
+ fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
@@ -860,10 +889,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fuse_conn_init(fc);
fc->dev = sb->s_dev;
+ fc->sb = sb;
err = fuse_bdi_init(fc, sb);
if (err)
goto err_put_conn;
+ /* Handle umasking inside the fuse code */
+ if (sb->s_flags & MS_POSIXACL)
+ fc->dont_mask = 1;
+ sb->s_flags |= MS_POSIXACL;
+
fc->release = fuse_free_conn;
fc->flags = d.flags;
fc->user_id = d.user_id;
@@ -941,12 +976,25 @@ static int fuse_get_sb(struct file_system_type *fs_type,
return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
}
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
+
+ kill_anon_super(sb);
+}
+
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
.fs_flags = FS_HAS_SUBTYPE,
.get_sb = fuse_get_sb,
- .kill_sb = kill_anon_super,
+ .kill_sb = fuse_kill_sb_anon,
};
#ifdef CONFIG_BLOCK
@@ -958,11 +1006,24 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type,
mnt);
}
+static void fuse_kill_sb_blk(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
+
+ kill_block_super(sb);
+}
+
static struct file_system_type fuseblk_fs_type = {
.owner = THIS_MODULE,
.name = "fuseblk",
.get_sb = fuse_get_sb_blk,
- .kill_sb = kill_block_super,
+ .kill_sb = fuse_kill_sb_blk,
.fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
};
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 03ebb439ace0..7ebae9a4ecc0 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -624,6 +624,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
{
struct gfs2_inode *ip = GFS2_I(mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
@@ -637,6 +638,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
error = gfs2_glock_nq(&ip->i_gh);
if (unlikely(error))
goto out_uninit;
+ if (&ip->i_inode == sdp->sd_rindex) {
+ error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
+ GL_NOCACHE, &m_ip->i_gh);
+ if (unlikely(error)) {
+ gfs2_glock_dq(&ip->i_gh);
+ goto out_uninit;
+ }
+ }
error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
if (error)
@@ -667,6 +676,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
rblocks += data_blocks ? data_blocks : 1;
if (ind_blocks || data_blocks)
rblocks += RES_STATFS + RES_QUOTA;
+ if (&ip->i_inode == sdp->sd_rindex)
+ rblocks += 2 * RES_STATFS;
error = gfs2_trans_begin(sdp, rblocks,
PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -712,6 +723,10 @@ out_alloc_put:
gfs2_alloc_put(ip);
}
out_unlock:
+ if (&ip->i_inode == sdp->sd_rindex) {
+ gfs2_glock_dq(&m_ip->i_gh);
+ gfs2_holder_uninit(&m_ip->i_gh);
+ }
gfs2_glock_dq(&ip->i_gh);
out_uninit:
gfs2_holder_uninit(&ip->i_gh);
@@ -725,14 +740,21 @@ out_uninit:
static void adjust_fs_space(struct inode *inode)
{
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+ struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+ struct buffer_head *m_bh, *l_bh;
u64 fs_total, new_free;
/* Total up the file system space, according to the latest rindex. */
fs_total = gfs2_ri_total(sdp);
+ if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
+ return;
spin_lock(&sdp->sd_statfs_spin);
+ gfs2_statfs_change_in(m_sc, m_bh->b_data +
+ sizeof(struct gfs2_dinode));
if (fs_total > (m_sc->sc_total + l_sc->sc_total))
new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
else
@@ -741,6 +763,13 @@ static void adjust_fs_space(struct inode *inode)
fs_warn(sdp, "File system extended by %llu blocks.\n",
(unsigned long long)new_free);
gfs2_statfs_change(sdp, new_free, new_free, 0);
+
+ if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
+ goto out;
+ update_statfs(sdp, m_bh, l_bh);
+ brelse(l_bh);
+out:
+ brelse(m_bh);
}
/**
@@ -763,6 +792,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
u64 to = pos + copied;
void *kaddr;
unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
@@ -794,6 +824,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
brelse(dibh);
gfs2_trans_end(sdp);
+ if (inode == sdp->sd_rindex) {
+ gfs2_glock_dq(&m_ip->i_gh);
+ gfs2_holder_uninit(&m_ip->i_gh);
+ }
gfs2_glock_dq(&ip->i_gh);
gfs2_holder_uninit(&ip->i_gh);
return copied;
@@ -823,6 +857,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = ip->i_alloc;
unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
@@ -865,6 +900,10 @@ failed:
gfs2_quota_unlock(ip);
gfs2_alloc_put(ip);
}
+ if (inode == sdp->sd_rindex) {
+ gfs2_glock_dq(&m_ip->i_gh);
+ gfs2_holder_uninit(&m_ip->i_gh);
+ }
gfs2_glock_dq(&ip->i_gh);
gfs2_holder_uninit(&ip->i_gh);
return ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 297421c0427a..827136ee794c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1300,7 +1300,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
struct gfs2_glock *gl;
int may_demote;
int nr_skipped = 0;
- int got_ref = 0;
LIST_HEAD(skipped);
if (nr == 0)
@@ -1315,10 +1314,13 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
+ /* Check if glock is about to be freed */
+ if (atomic_read(&gl->gl_ref) == 0)
+ continue;
+
/* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl);
- got_ref = 1;
spin_unlock(&lru_lock);
spin_lock(&gl->gl_spin);
may_demote = demote_ok(gl);
@@ -1327,25 +1329,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
if (may_demote) {
handle_callback(gl, LM_ST_UNLOCKED, 0);
nr--;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put(gl);
- got_ref = 0;
}
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
spin_lock(&lru_lock);
- if (may_demote)
- continue;
- }
- if (list_empty(&gl->gl_lru) &&
- (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
- nr_skipped++;
- list_add(&gl->gl_lru, &skipped);
- }
- if (got_ref) {
- spin_unlock(&lru_lock);
- gfs2_glock_put(gl);
- spin_lock(&lru_lock);
- got_ref = 0;
+ continue;
}
+ nr_skipped++;
+ list_add(&gl->gl_lru, &skipped);
}
list_splice(&skipped, &lru_list);
atomic_add(nr_skipped, &lru_count);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0a6801336470..552e321cee5e 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -353,7 +353,7 @@ fail:
return error;
}
-static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
{
const struct gfs2_statfs_change *str = buf;
@@ -441,6 +441,29 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
brelse(l_bh);
}
+void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
+ struct buffer_head *l_bh)
+{
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+ struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
+ struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
+ struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+
+ gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
+
+ spin_lock(&sdp->sd_statfs_spin);
+ m_sc->sc_total += l_sc->sc_total;
+ m_sc->sc_free += l_sc->sc_free;
+ m_sc->sc_dinodes += l_sc->sc_dinodes;
+ memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
+ memset(l_bh->b_data + sizeof(struct gfs2_dinode),
+ 0, sizeof(struct gfs2_statfs_change));
+ spin_unlock(&sdp->sd_statfs_spin);
+
+ gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
+ gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
+}
+
int gfs2_statfs_sync(struct gfs2_sbd *sdp)
{
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -477,19 +500,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
if (error)
goto out_bh2;
- gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
-
- spin_lock(&sdp->sd_statfs_spin);
- m_sc->sc_total += l_sc->sc_total;
- m_sc->sc_free += l_sc->sc_free;
- m_sc->sc_dinodes += l_sc->sc_dinodes;
- memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
- memset(l_bh->b_data + sizeof(struct gfs2_dinode),
- 0, sizeof(struct gfs2_statfs_change));
- spin_unlock(&sdp->sd_statfs_spin);
-
- gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
- gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
+ update_statfs(sdp, m_bh, l_bh);
gfs2_trans_end(sdp);
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index b56413e3e40d..22e0417ed996 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -40,6 +40,10 @@ extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
s64 dinodes);
+extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
+ const void *buf);
+extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
+ struct buffer_head *l_bh);
extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fe02ad4740e7..032604e5ef2c 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* NULL is printed as <NULL> by sprintf: avoid that. */
if (req_root == NULL)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 941c8425c10b..2d8abaf3b8a8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -44,6 +44,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
static struct backing_dev_info hugetlbfs_backing_dev_info = {
+ .name = "hugetlbfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7515e73e2bfb..696686cc206e 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -130,9 +130,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (jffs2_sum_active()) {
s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
if (!s) {
- kfree(flashbuf);
JFFS2_WARNING("Can't allocate memory for summary\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
}
diff --git a/fs/namei.c b/fs/namei.c
index 5b961eb71cbf..f3c5b278895a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1761,6 +1761,10 @@ do_last:
goto exit;
}
filp = nameidata_to_filp(&nd, open_flag);
+ if (IS_ERR(filp))
+ ima_counts_put(&nd.path,
+ acc_mode & (MAY_READ | MAY_WRITE |
+ MAY_EXEC));
mnt_drop_write(nd.path.mnt);
if (nd.root.mnt)
path_put(&nd.root);
@@ -1817,6 +1821,9 @@ ok:
goto exit;
}
filp = nameidata_to_filp(&nd, open_flag);
+ if (IS_ERR(filp))
+ ima_counts_put(&nd.path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
/*
* It is now safe to drop the mnt write
* because the filp has had a write taken
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c2d061675d80..229b8949182a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -879,6 +879,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
server->rsize = NFS_MAX_FILE_IO_SIZE;
server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ server->backing_dev_info.name = "nfs";
server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
if (server->wsize > max_rpc_payload)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4145083dcf88..23341c1063bc 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -678,7 +678,6 @@ __be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
int access, struct file **filp)
{
- const struct cred *cred = current_cred();
struct dentry *dentry;
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
@@ -733,7 +732,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
vfs_dq_init(inode);
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
- flags, cred);
+ flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
else
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 828a889be909..6b71be963699 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -361,7 +361,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
spin_lock(&entry->lock);
} else {
- fsnotify_add_mark(new_entry, dnotify_group, inode);
+ fsnotify_add_mark(new_entry, dnotify_group, inode, 0);
spin_lock(&new_entry->lock);
entry = new_entry;
dnentry = new_dnentry;
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index c8a07c65482b..3f3261b11295 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -283,12 +283,20 @@ struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *grou
return NULL;
}
+void fsnotify_duplicate_mark(struct fsnotify_mark_entry *new, struct fsnotify_mark_entry *old)
+{
+ assert_spin_locked(&old->lock);
+ new->inode = old->inode;
+ new->group = old->group;
+ new->mask = old->mask;
+ new->free_mark = old->free_mark;
+}
+
/*
* Nothing fancy, just initialize lists and locks and counters.
*/
void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
void (*free_mark)(struct fsnotify_mark_entry *entry))
-
{
spin_lock_init(&entry->lock);
atomic_set(&entry->refcnt, 1);
@@ -305,9 +313,10 @@ void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
* event types should be delivered to which group and for which inodes.
*/
int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
- struct fsnotify_group *group, struct inode *inode)
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups)
{
- struct fsnotify_mark_entry *lentry;
+ struct fsnotify_mark_entry *lentry = NULL;
int ret = 0;
inode = igrab(inode);
@@ -327,7 +336,8 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
entry->group = group;
entry->inode = inode;
- lentry = fsnotify_find_mark_entry(group, inode);
+ if (!allow_dups)
+ lentry = fsnotify_find_mark_entry(group, inode);
if (!lentry) {
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
list_add(&entry->g_list, &group->mark_entries);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ff231ad23895..1a3733bb1934 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -296,12 +296,15 @@ static int inotify_fasync(int fd, struct file *file, int on)
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
+ struct user_struct *user = group->inotify_data.user;
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
+ atomic_dec(&user->inotify_devs);
+
return 0;
}
@@ -445,7 +448,7 @@ find_entry:
goto out_err;
}
- ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
+ ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode, 0);
if (ret == -EEXIST)
goto find_entry;
else if (ret)
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 1c9efb406a96..02bf17808bdc 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -325,6 +325,7 @@ clear_fields:
}
static struct backing_dev_info dlmfs_backing_dev_info = {
+ .name = "ocfs2-dlmfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3ce5ae9e3d2d..917f338a6739 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -237,20 +237,19 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
struct mm_struct *mm = get_task_mm(task);
if (!mm)
return NULL;
+ if (mm != current->mm) {
+ /*
+ * task->mm can be changed before security check,
+ * in that case we must notice the change after.
+ */
+ if (!ptrace_may_access(task, PTRACE_MODE_READ) ||
+ mm != task->mm) {
+ mmput(mm);
+ return NULL;
+ }
+ }
down_read(&mm->mmap_sem);
- task_lock(task);
- if (task->mm != mm)
- goto out;
- if (task->mm != current->mm &&
- __ptrace_may_access(task, PTRACE_MODE_READ) < 0)
- goto out;
- task_unlock(task);
return mm;
-out:
- task_unlock(task);
- up_read(&mm->mmap_sem);
- mmput(mm);
- return NULL;
}
static int proc_pid_cmdline(struct task_struct *task, char * buffer)
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 0c10a0b3f146..766b1d456050 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -4,13 +4,18 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/time.h>
+#include <linux/kernel_stat.h>
#include <asm/cputime.h>
static int uptime_proc_show(struct seq_file *m, void *v)
{
struct timespec uptime;
struct timespec idle;
- cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
+ int i;
+ cputime_t idletime = cputime_zero;
+
+ for_each_possible_cpu(i)
+ idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime);
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 0ff7566c767c..a7f0110fca4c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -46,6 +46,7 @@ static const struct super_operations ramfs_ops;
static const struct inode_operations ramfs_dir_inode_operations;
static struct backing_dev_info ramfs_backing_dev_info = {
+ .name = "ramfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 7c5ab6330dd6..6a9e30c041dd 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
hashes.o tail_conversion.o journal.o resize.o \
- item_ops.o ioctl.o procfs.o xattr.o
+ item_ops.o ioctl.o procfs.o xattr.o lock.o
ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index e716161ab325..685495707181 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1249,14 +1249,18 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
else if (bitmap == 0)
block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, block);
+ reiserfs_write_lock(sb);
if (bh == NULL)
reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
"reading failed", __func__, block);
else {
if (buffer_locked(bh)) {
PROC_INFO_INC(sb, scan_bitmap.wait);
+ reiserfs_write_unlock(sb);
__wait_on_buffer(bh);
+ reiserfs_write_lock(sb);
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 6d2668fdc384..17f31ad379c8 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -174,14 +174,22 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
// user space buffer is swapped out. At that time
// entry can move to somewhere else
memcpy(local_buf, d_name, d_reclen);
+
+ /*
+ * Since filldir might sleep, we can release
+ * the write lock here for other waiters
+ */
+ reiserfs_write_unlock(inode->i_sb);
if (filldir
(dirent, local_buf, d_reclen, d_off, d_ino,
DT_UNKNOWN) < 0) {
+ reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
}
goto end;
}
+ reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 128d3f7c8aa5..60c080440661 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -21,14 +21,6 @@
#include <linux/buffer_head.h>
#include <linux/kernel.h>
-#ifdef CONFIG_REISERFS_CHECK
-
-struct tree_balance *cur_tb = NULL; /* detects whether more than one
- copy of tb exists as a means
- of checking whether schedule
- is interrupting do_balance */
-#endif
-
static inline void buffer_info_init_left(struct tree_balance *tb,
struct buffer_info *bi)
{
@@ -1840,11 +1832,12 @@ static int check_before_balancing(struct tree_balance *tb)
{
int retval = 0;
- if (cur_tb) {
+ if (REISERFS_SB(tb->tb_sb)->cur_tb) {
reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
"occurred based on cur_tb not being null at "
"this point in code. do_balance cannot properly "
- "handle schedule occurring while it runs.");
+ "handle concurrent tree accesses on a same "
+ "mount point.");
}
/* double check that buffers that we will modify are unlocked. (fix_nodes should already have
@@ -1986,7 +1979,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
"check");*/
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
#ifdef CONFIG_REISERFS_CHECK
- cur_tb = tb;
+ REISERFS_SB(tb->tb_sb)->cur_tb = tb;
#endif
}
@@ -1996,7 +1989,7 @@ static inline void do_balance_completed(struct tree_balance *tb)
#ifdef CONFIG_REISERFS_CHECK
check_leaf_level(tb);
check_internal_levels(tb);
- cur_tb = NULL;
+ REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
#endif
/* reiserfs_free_block is no longer schedule safe. So, we need to
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 5e5a4e6fbaf8..d2f31330dcae 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -563,9 +563,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
return needed_nodes;
}
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
/* Set parameters for balancing.
* Performs write of results of analysis of balancing into structure tb,
@@ -1022,7 +1019,11 @@ static int get_far_parent(struct tree_balance *tb,
/* Check whether the common parent is locked. */
if (buffer_locked(*pcom_father)) {
+
+ /* Release the write lock while the buffer is busy */
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(*pcom_father);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(*pcom_father);
return REPEAT_SEARCH;
@@ -1927,7 +1928,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(bh);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -1965,7 +1968,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
FL[h]);
son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
+ reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2003,7 +2008,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
child_position =
(bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
+ reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2278,7 +2285,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
REPEAT_SEARCH : CARRY_ON;
}
#endif
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(locked);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -2349,12 +2358,14 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
/* if it possible in indirect_to_direct conversion */
if (buffer_locked(tbS0)) {
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(tbS0);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
#ifdef CONFIG_REISERFS_CHECK
- if (cur_tb) {
+ if (REISERFS_SB(tb->tb_sb)->cur_tb) {
print_cur_tb("fix_nodes");
reiserfs_panic(tb->tb_sb, "PAP-8305",
"there is pending do_balance");
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a14d6cd9eeda..853f4f6fe920 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -489,10 +489,14 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
disappeared */
if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
int err;
- lock_kernel();
+
+ reiserfs_write_lock(inode->i_sb);
+
err = reiserfs_commit_for_inode(inode);
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
- unlock_kernel();
+
+ reiserfs_write_unlock(inode->i_sb);
+
if (err < 0)
ret = err;
}
@@ -601,6 +605,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
__le32 *item;
int done;
int fs_gen;
+ int lock_depth;
struct reiserfs_transaction_handle *th = NULL;
/* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion
@@ -616,12 +621,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
loff_t new_offset =
(((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
- /* bad.... */
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
version = get_inode_item_key_version(inode);
if (!file_capable(inode, block)) {
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
return -EFBIG;
}
@@ -633,7 +637,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* find number of block-th logical block of the file */
ret = _get_block_create_0(inode, block, bh_result,
create | GET_BLOCK_READ_DIRECT);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
return ret;
}
/*
@@ -751,7 +755,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (!dangle && th)
retval = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
/* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this
@@ -997,10 +1001,16 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (retval)
goto failure;
}
- /* inserting indirect pointers for a hole can take a
- ** long time. reschedule if needed
+ /*
+ * inserting indirect pointers for a hole can take a
+ * long time. reschedule if needed and also release the write
+ * lock for others.
*/
- cond_resched();
+ if (need_resched()) {
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ schedule();
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ }
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval == IO_ERROR) {
@@ -1035,7 +1045,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
retval = err;
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
reiserfs_check_path(&path);
return retval;
}
@@ -2072,8 +2082,9 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
int error;
struct buffer_head *bh = NULL;
int err2;
+ int lock_depth;
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
if (inode->i_size > 0) {
error = grab_tail_page(inode, &page, &bh);
@@ -2142,14 +2153,17 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+
return 0;
out:
if (page) {
unlock_page(page);
page_cache_release(page);
}
- reiserfs_write_unlock(inode->i_sb);
+
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+
return error;
}
@@ -2608,7 +2622,10 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
int ret;
int old_ref = 0;
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
fix_tail_page_for_writing(page);
if (reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th;
@@ -2664,6 +2681,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
int update_sd = 0;
struct reiserfs_transaction_handle *th;
unsigned start;
+ int lock_depth = 0;
+ bool locked = false;
if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
pos ++;
@@ -2690,9 +2709,11 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
** to do the i_size updates here.
*/
pos += copied;
+
if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ locked = true;
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
packing */
@@ -2703,10 +2724,9 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
ret = journal_begin(&myth, inode->i_sb, 1);
- if (ret) {
- reiserfs_write_unlock(inode->i_sb);
+ if (ret)
goto journal_error;
- }
+
reiserfs_update_inode_transaction(inode);
inode->i_size = pos;
/*
@@ -2718,34 +2738,36 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
reiserfs_update_sd(&myth, inode);
update_sd = 1;
ret = journal_end(&myth, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto journal_error;
}
if (th) {
- reiserfs_write_lock(inode->i_sb);
+ if (!locked) {
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ locked = true;
+ }
if (!update_sd)
mark_inode_dirty(inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto out;
}
out:
+ if (locked)
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
unlock_page(page);
page_cache_release(page);
return ret == 0 ? copied : ret;
journal_error:
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ locked = false;
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
reiserfs_update_sd(th, inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
}
-
goto out;
}
@@ -2758,7 +2780,10 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int update_sd = 0;
struct reiserfs_transaction_handle *th = NULL;
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
if (reiserfs_transaction_running(inode->i_sb)) {
th = current->journal_info;
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 0ccc3fdda7bf..5e40b0cd4c3d 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -141,9 +141,11 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
default:
return -ENOIOCTLCMD;
}
- lock_kernel();
+
+ reiserfs_write_lock(inode->i_sb);
ret = reiserfs_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
- unlock_kernel();
+ reiserfs_write_unlock(inode->i_sb);
+
return ret;
}
#endif
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 77f5bb746bf0..86c1ff41476f 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -429,21 +429,6 @@ static void clear_prepared_bits(struct buffer_head *bh)
clear_buffer_journal_restore_dirty(bh);
}
-/* utility function to force a BUG if it is called without the big
-** kernel lock held. caller is the string printed just before calling BUG()
-*/
-void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
-{
-#ifdef CONFIG_SMP
- if (current->lock_depth < 0) {
- reiserfs_panic(sb, "journal-1", "%s called without kernel "
- "lock held", caller);
- }
-#else
- ;
-#endif
-}
-
/* return a cnode with same dev, block number and size in table, or null if not found */
static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
super_block
@@ -556,7 +541,8 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
static inline void lock_journal(struct super_block *sb)
{
PROC_INFO_INC(sb, journal.lock_journal);
- mutex_lock(&SB_JOURNAL(sb)->j_mutex);
+
+ reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
}
/* unlock the current transaction */
@@ -708,7 +694,9 @@ static void check_barrier_completion(struct super_block *s,
disable_barrier(s);
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
+ reiserfs_write_unlock(s);
sync_dirty_buffer(bh);
+ reiserfs_write_lock(s);
}
}
@@ -996,8 +984,13 @@ static int reiserfs_async_progress_wait(struct super_block *s)
{
DEFINE_WAIT(wait);
struct reiserfs_journal *j = SB_JOURNAL(s);
- if (atomic_read(&j->j_async_throttle))
+
+ if (atomic_read(&j->j_async_throttle)) {
+ reiserfs_write_unlock(s);
congestion_wait(WRITE, HZ / 10);
+ reiserfs_write_lock(s);
+ }
+
return 0;
}
@@ -1043,7 +1036,8 @@ static int flush_commit_list(struct super_block *s,
}
/* make sure nobody is trying to flush this one at the same time */
- mutex_lock(&jl->j_commit_mutex);
+ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
+
if (!journal_list_still_alive(s, trans_id)) {
mutex_unlock(&jl->j_commit_mutex);
goto put_jl;
@@ -1061,12 +1055,17 @@ static int flush_commit_list(struct super_block *s,
if (!list_empty(&jl->j_bh_list)) {
int ret;
- unlock_kernel();
+
+ /*
+ * We might sleep in numerous places inside
+ * write_ordered_buffers. Relax the write lock.
+ */
+ reiserfs_write_unlock(s);
ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_bh_list);
if (ret < 0 && retval == 0)
retval = ret;
- lock_kernel();
+ reiserfs_write_lock(s);
}
BUG_ON(!list_empty(&jl->j_bh_list));
/*
@@ -1085,8 +1084,11 @@ static int flush_commit_list(struct super_block *s,
SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
if (tbh) {
- if (buffer_dirty(tbh))
- ll_rw_block(WRITE, 1, &tbh) ;
+ if (buffer_dirty(tbh)) {
+ reiserfs_write_unlock(s);
+ ll_rw_block(WRITE, 1, &tbh);
+ reiserfs_write_lock(s);
+ }
put_bh(tbh) ;
}
}
@@ -1114,12 +1116,19 @@ static int flush_commit_list(struct super_block *s,
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
+
+ reiserfs_write_unlock(s);
wait_on_buffer(tbh);
+ reiserfs_write_lock(s);
// since we're using ll_rw_blk above, it might have skipped over
// a locked buffer. Double check here
//
- if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
+ /* redundant, sync_dirty_buffer() checks */
+ if (buffer_dirty(tbh)) {
+ reiserfs_write_unlock(s);
sync_dirty_buffer(tbh);
+ reiserfs_write_lock(s);
+ }
if (unlikely(!buffer_uptodate(tbh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(s, "journal-601",
@@ -1143,10 +1152,15 @@ static int flush_commit_list(struct super_block *s,
if (buffer_dirty(jl->j_commit_bh))
BUG();
mark_buffer_dirty(jl->j_commit_bh) ;
+ reiserfs_write_unlock(s);
sync_dirty_buffer(jl->j_commit_bh) ;
+ reiserfs_write_lock(s);
}
- } else
+ } else {
+ reiserfs_write_unlock(s);
wait_on_buffer(jl->j_commit_bh);
+ reiserfs_write_lock(s);
+ }
check_barrier_completion(s, jl->j_commit_bh);
@@ -1286,7 +1300,9 @@ static int _update_journal_header_block(struct super_block *sb,
if (trans_id >= journal->j_last_flush_trans_id) {
if (buffer_locked((journal->j_header_bh))) {
+ reiserfs_write_unlock(sb);
wait_on_buffer((journal->j_header_bh));
+ reiserfs_write_lock(sb);
if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(sb, "journal-699",
@@ -1312,12 +1328,16 @@ static int _update_journal_header_block(struct super_block *sb,
disable_barrier(sb);
goto sync;
}
+ reiserfs_write_unlock(sb);
wait_on_buffer(journal->j_header_bh);
+ reiserfs_write_lock(sb);
check_barrier_completion(sb, journal->j_header_bh);
} else {
sync:
set_buffer_dirty(journal->j_header_bh);
+ reiserfs_write_unlock(sb);
sync_dirty_buffer(journal->j_header_bh);
+ reiserfs_write_lock(sb);
}
if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning(sb, "journal-837",
@@ -1409,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
/* if flushall == 0, the lock is already held */
if (flushall) {
- mutex_lock(&journal->j_flush_mutex);
+ reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
} else if (mutex_trylock(&journal->j_flush_mutex)) {
BUG();
}
@@ -1553,7 +1573,11 @@ static int flush_journal_list(struct super_block *s,
reiserfs_panic(s, "journal-1011",
"cn->bh is NULL");
}
+
+ reiserfs_write_unlock(s);
wait_on_buffer(cn->bh);
+ reiserfs_write_lock(s);
+
if (!cn->bh) {
reiserfs_panic(s, "journal-1012",
"cn->bh is NULL");
@@ -1769,7 +1793,7 @@ static int kupdate_transactions(struct super_block *s,
struct reiserfs_journal *journal = SB_JOURNAL(s);
chunk.nr = 0;
- mutex_lock(&journal->j_flush_mutex);
+ reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
if (!journal_list_still_alive(s, orig_trans_id)) {
goto done;
}
@@ -1973,11 +1997,19 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
reiserfs_mounted_fs_count--;
/* wait for all commits to finish */
cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
+
+ /*
+ * We must release the write lock here because
+ * the workqueue job (flush_async_commit) needs this lock
+ */
+ reiserfs_write_unlock(sb);
flush_workqueue(commit_wq);
+
if (!reiserfs_mounted_fs_count) {
destroy_workqueue(commit_wq);
commit_wq = NULL;
}
+ reiserfs_write_lock(sb);
free_journal_ram(sb);
@@ -2243,7 +2275,11 @@ static int journal_read_transaction(struct super_block *sb,
/* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
+
+ reiserfs_write_unlock(sb);
wait_on_buffer(log_blocks[i]);
+ reiserfs_write_lock(sb);
+
if (!buffer_uptodate(log_blocks[i])) {
reiserfs_warning(sb, "journal-1212",
"REPLAY FAILURE fsck required! "
@@ -2964,8 +3000,11 @@ static void queue_log_writer(struct super_block *s)
init_waitqueue_entry(&wait, current);
add_wait_queue(&journal->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
+ if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
+ reiserfs_write_unlock(s);
schedule();
+ reiserfs_write_lock(s);
+ }
__set_current_state(TASK_RUNNING);
remove_wait_queue(&journal->j_join_wait, &wait);
}
@@ -2982,7 +3021,9 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
struct reiserfs_journal *journal = SB_JOURNAL(sb);
unsigned long bcount = journal->j_bcount;
while (1) {
+ reiserfs_write_unlock(sb);
schedule_timeout_uninterruptible(1);
+ reiserfs_write_lock(sb);
journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&journal->j_wcount) > 0 ||
atomic_read(&journal->j_jlock)) &&
@@ -3033,7 +3074,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
unlock_journal(sb);
+ reiserfs_write_unlock(sb);
reiserfs_wait_on_write_block(sb);
+ reiserfs_write_lock(sb);
PROC_INFO_INC(sb, journal.journal_relock_writers);
goto relock;
}
@@ -3506,14 +3549,14 @@ static void flush_async_commits(struct work_struct *work)
struct reiserfs_journal_list *jl;
struct list_head *entry;
- lock_kernel();
+ reiserfs_write_lock(sb);
if (!list_empty(&journal->j_journal_list)) {
/* last entry is the youngest, commit it and you get everything */
entry = journal->j_journal_list.prev;
jl = JOURNAL_LIST_ENTRY(entry);
flush_commit_list(sb, jl, 1);
}
- unlock_kernel();
+ reiserfs_write_unlock(sb);
}
/*
@@ -4041,7 +4084,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* the new transaction is fully setup, and we've already flushed the
* ordered bh list
*/
- mutex_lock(&jl->j_commit_mutex);
+ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
/* save the transaction id in case we need to commit it later */
commit_trans_id = jl->j_trans_id;
@@ -4156,7 +4199,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
next = cn->next;
free_cnode(sb, cn);
cn = next;
+ reiserfs_write_unlock(sb);
cond_resched();
+ reiserfs_write_lock(sb);
}
/* we are done with both the c_bh and d_bh, but
@@ -4203,10 +4248,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* is lost.
*/
if (!list_empty(&jl->j_tail_bh_list)) {
- unlock_kernel();
+ reiserfs_write_unlock(sb);
write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_tail_bh_list);
- lock_kernel();
+ reiserfs_write_lock(sb);
}
BUG_ON(!list_empty(&jl->j_tail_bh_list));
mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
new file mode 100644
index 000000000000..cb1bba3802dd
--- /dev/null
+++ b/fs/reiserfs/lock.c
@@ -0,0 +1,89 @@
+#include <linux/reiserfs_fs.h>
+#include <linux/mutex.h>
+
+/*
+ * The previous reiserfs locking scheme was heavily based on
+ * the tricky properties of the Bkl:
+ *
+ * - it was acquired recursively by a same task
+ * - the performances relied on the release-while-schedule() property
+ *
+ * Now that we replace it by a mutex, we still want to keep the same
+ * recursive property to avoid big changes in the code structure.
+ * We use our own lock_owner here because the owner field on a mutex
+ * is only available in SMP or mutex debugging, also we only need this field
+ * for this mutex, no need for a system wide mutex facility.
+ *
+ * Also this lock is often released before a call that could block because
+ * reiserfs performances were partialy based on the release while schedule()
+ * property of the Bkl.
+ */
+void reiserfs_write_lock(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ if (sb_i->lock_owner != current) {
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ }
+
+ /* No need to protect it, only the current task touches it */
+ sb_i->lock_depth++;
+}
+
+void reiserfs_write_unlock(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ /*
+ * Are we unlocking without even holding the lock?
+ * Such a situation could even raise a BUG() if we don't
+ * want the data become corrupted
+ */
+ WARN_ONCE(sb_i->lock_owner != current,
+ "Superblock write lock imbalance");
+
+ if (--sb_i->lock_depth == -1) {
+ sb_i->lock_owner = NULL;
+ mutex_unlock(&sb_i->lock);
+ }
+}
+
+/*
+ * If we already own the lock, just exit and don't increase the depth.
+ * Useful when we don't want to lock more than once.
+ *
+ * We always return the lock_depth we had before calling
+ * this function.
+ */
+int reiserfs_write_lock_once(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ if (sb_i->lock_owner != current) {
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ return sb_i->lock_depth++;
+ }
+
+ return sb_i->lock_depth;
+}
+
+void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
+{
+ if (lock_depth == -1)
+ reiserfs_write_unlock(s);
+}
+
+/*
+ * Utility function to force a BUG if it is called without the superblock
+ * write lock held. caller is the string printed just before calling BUG()
+ */
+void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
+
+ if (sb_i->lock_depth < 0)
+ reiserfs_panic(sb, "%s called without kernel lock held %d",
+ caller);
+}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 271579128634..b3973c9f0bf1 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -324,6 +324,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
int retval;
+ int lock_depth;
struct inode *inode = NULL;
struct reiserfs_dir_entry de;
INITIALIZE_PATH(path_to_entry);
@@ -331,7 +332,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
return ERR_PTR(-ENAMETOOLONG);
- reiserfs_write_lock(dir->i_sb);
+ /*
+ * Might be called with or without the write lock, must be careful
+ * to not recursively hold it in case we want to release the lock
+ * before rescheduling.
+ */
+ lock_depth = reiserfs_write_lock_once(dir->i_sb);
+
de.de_gen_number_bit_string = NULL;
retval =
reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len,
@@ -341,7 +348,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
inode = reiserfs_iget(dir->i_sb,
(struct cpu_key *)&(de.de_dir_id));
if (!inode || IS_ERR(inode)) {
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
return ERR_PTR(-EACCES);
}
@@ -350,7 +357,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_PRIVATE(dir))
inode->i_flags |= S_PRIVATE;
}
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
if (retval == IO_ERROR) {
return ERR_PTR(-EIO);
}
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 536eacaeb710..adbc6f538515 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -349,10 +349,6 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
. */
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
-
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 18b315d3d104..b3a94d20f0fc 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -141,7 +141,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
+ reiserfs_write_unlock(s);
sync_dirty_buffer(bh);
+ reiserfs_write_lock(s);
// update bitmap_info stuff
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index d036ee5b1c81..5fa7118f04e1 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -222,9 +222,6 @@ static inline int bin_search(const void *key, /* Key to search for. */
return ITEM_NOT_FOUND;
}
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
/* Minimal possible key. It is never in the tree. */
const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} };
@@ -519,25 +516,48 @@ static int is_tree_node(struct buffer_head *bh, int level)
#define SEARCH_BY_KEY_READA 16
-/* The function is NOT SCHEDULE-SAFE! */
-static void search_by_key_reada(struct super_block *s,
+/*
+ * The function is NOT SCHEDULE-SAFE!
+ * It might unlock the write lock if we needed to wait for a block
+ * to be read. Note that in this case it won't recover the lock to avoid
+ * high contention resulting from too much lock requests, especially
+ * the caller (search_by_key) will perform other schedule-unsafe
+ * operations just after calling this function.
+ *
+ * @return true if we have unlocked
+ */
+static bool search_by_key_reada(struct super_block *s,
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
+ bool unlocked = false;
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
}
+ /*
+ * We are going to read some blocks on which we
+ * have a reference. It's safe, though we might be
+ * reading blocks concurrently changed if we release
+ * the lock. But it's still fine because we check later
+ * if the tree changed
+ */
for (j = 0; j < i; j++) {
/*
* note, this needs attention if we are getting rid of the BKL
* you have to make sure the prepared bit isn't set on this buffer
*/
- if (!buffer_uptodate(bh[j]))
+ if (!buffer_uptodate(bh[j])) {
+ if (!unlocked) {
+ reiserfs_write_unlock(s);
+ unlocked = true;
+ }
ll_rw_block(READA, 1, bh + j);
+ }
brelse(bh[j]);
}
+ return unlocked;
}
/**************************************************************************
@@ -625,11 +645,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
have a pointer to it. */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
+ bool unlocked = false;
+
if (!buffer_uptodate(bh) && reada_count > 1)
- search_by_key_reada(sb, reada_bh,
+ /* may unlock the write lock */
+ unlocked = search_by_key_reada(sb, reada_bh,
reada_blocks, reada_count);
+ /*
+ * If we haven't already unlocked the write lock,
+ * then we need to do that here before reading
+ * the current block
+ */
+ if (!buffer_uptodate(bh) && !unlocked) {
+ reiserfs_write_unlock(sb);
+ unlocked = true;
+ }
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
+
+ if (unlocked)
+ reiserfs_write_lock(sb);
if (!buffer_uptodate(bh))
goto io_error;
} else {
@@ -673,7 +708,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
!key_in_buffer(search_path, key, sb),
"PAP-5130: key is not in the buffer");
#ifdef CONFIG_REISERFS_CHECK
- if (cur_tb) {
+ if (REISERFS_SB(sb)->cur_tb) {
print_cur_tb("5140");
reiserfs_panic(sb, "PAP-5140",
"schedule occurred in do_balance!");
@@ -1024,7 +1059,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
reiserfs_free_block(th, inode, block, 1);
}
+ reiserfs_write_unlock(sb);
cond_resched();
+ reiserfs_write_lock(sb);
if (item_moved (&s_ih, path)) {
need_re_search = 1;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d3aeb061612b..0336b1764c49 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -467,6 +467,12 @@ static void reiserfs_put_super(struct super_block *s)
th.t_trans_id = 0;
lock_kernel();
+ /*
+ * We didn't need to explicitly lock here before, because put_super
+ * is called with the bkl held.
+ * Now that we have our own lock, we must explicitly lock.
+ */
+ reiserfs_write_lock(s);
if (s->s_dirt)
reiserfs_write_super(s);
@@ -500,6 +506,8 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_proc_info_done(s);
+ reiserfs_write_unlock(s);
+ mutex_destroy(&REISERFS_SB(s)->lock);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
@@ -555,25 +563,28 @@ static void reiserfs_dirty_inode(struct inode *inode)
struct reiserfs_transaction_handle th;
int err = 0;
+ int lock_depth;
+
if (inode->i_sb->s_flags & MS_RDONLY) {
reiserfs_warning(inode->i_sb, "clm-6006",
"writing inode %lu on readonly FS",
inode->i_ino);
return;
}
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
/* this is really only used for atime updates, so they don't have
** to be included in O_SYNC or fsync
*/
err = journal_begin(&th, inode->i_sb, 1);
- if (err) {
- reiserfs_write_unlock(inode->i_sb);
- return;
- }
+ if (err)
+ goto out;
+
reiserfs_update_sd(&th, inode);
journal_end(&th, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
+
+out:
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
}
#ifdef CONFIG_QUOTA
@@ -1169,7 +1180,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
unsigned int qfmt = 0;
#ifdef CONFIG_QUOTA
int i;
+#endif
+ /*
+ * We used to protect using the implicitly acquired bkl here.
+ * Now we must explictly acquire our own lock
+ */
+ reiserfs_write_lock(s);
+
+#ifdef CONFIG_QUOTA
memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names));
#endif
@@ -1296,10 +1315,12 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
out_ok:
replace_mount_options(s, new_opts);
+ reiserfs_write_unlock(s);
unlock_kernel();
return 0;
out_err:
+ reiserfs_write_unlock(s);
kfree(new_opts);
unlock_kernel();
return err;
@@ -1405,7 +1426,9 @@ static int read_super_block(struct super_block *s, int offset)
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
+ reiserfs_write_unlock(s);
wait_on_buffer(SB_BUFFER_WITH_SB(s));
+ reiserfs_write_lock(s);
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
@@ -1614,7 +1637,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
if (!sbi) {
errval = -ENOMEM;
- goto error;
+ goto error_alloc;
}
s->s_fs_info = sbi;
/* Set default values for options: non-aggressive tails, RO on errors */
@@ -1628,6 +1651,20 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
/* setup default block allocator options */
reiserfs_init_alloc_options(s);
+ mutex_init(&REISERFS_SB(s)->lock);
+ REISERFS_SB(s)->lock_depth = -1;
+
+ /*
+ * This function is called with the bkl, which also was the old
+ * locking used here.
+ * do_journal_begin() will soon check if we hold the lock (ie: was the
+ * bkl). This is likely because do_journal_begin() has several another
+ * callers because at this time, it doesn't seem to be necessary to
+ * protect against anything.
+ * Anyway, let's be conservative and lock for now.
+ */
+ reiserfs_write_lock(s);
+
jdev_name = NULL;
if (reiserfs_parse_options
(s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
@@ -1853,9 +1890,13 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
init_waitqueue_head(&(sbi->s_wait));
spin_lock_init(&sbi->bitmap_lock);
+ reiserfs_write_unlock(s);
+
return (0);
error:
+ reiserfs_write_unlock(s);
+error_alloc:
if (jinit_done) { /* kill the commit thread, free journal ram */
journal_release_error(NULL, s);
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index f3d47d856848..a71e9e4c0244 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -976,7 +976,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
int err = 0;
/* If we don't have the privroot located yet - go find it */
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
@@ -1012,7 +1012,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
if (privroot->d_inode) {
s->s_xattr = reiserfs_xattr_handlers;
- mutex_lock(&privroot->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/fs/super.c b/fs/super.c
index 2761d3e22ed9..0d22ce3be4aa 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -62,9 +62,6 @@ static struct super_block *alloc_super(struct file_system_type *type)
s = NULL;
goto out;
}
- INIT_LIST_HEAD(&s->s_dirty);
- INIT_LIST_HEAD(&s->s_io);
- INIT_LIST_HEAD(&s->s_more_io);
INIT_LIST_HEAD(&s->s_files);
INIT_LIST_HEAD(&s->s_instances);
INIT_HLIST_HEAD(&s->s_anon);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 555f0ff988df..e57f98e54fce 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -29,6 +29,7 @@ static const struct address_space_operations sysfs_aops = {
};
static struct backing_dev_info sysfs_backing_dev_info = {
+ .name = "sysfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index bc5857199ec2..762a7d6cec73 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -297,6 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
{
struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
+ dbg_io("jhead %d", wbuf->jhead);
wbuf->need_sync = 1;
wbuf->c->need_wbuf_sync = 1;
ubifs_wake_up_bgt(wbuf->c);
@@ -311,8 +312,12 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
{
ubifs_assert(!hrtimer_active(&wbuf->timer));
- if (!ktime_to_ns(wbuf->softlimit))
+ if (wbuf->no_timer)
return;
+ dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead,
+ div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
+ div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
+ USEC_PER_SEC));
hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
HRTIMER_MODE_REL);
}
@@ -323,11 +328,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
*/
static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
{
- /*
- * If the syncer is waiting for the lock (from the background thread's
- * context) and another task is changing write-buffer then the syncing
- * should be canceled.
- */
+ if (wbuf->no_timer)
+ return;
wbuf->need_sync = 0;
hrtimer_cancel(&wbuf->timer);
}
@@ -349,8 +351,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
/* Write-buffer is empty or not seeked */
return 0;
- dbg_io("LEB %d:%d, %d bytes",
- wbuf->lnum, wbuf->offs, wbuf->used);
+ dbg_io("LEB %d:%d, %d bytes, jhead %d",
+ wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead);
ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
ubifs_assert(!(wbuf->avail & 7));
ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
@@ -390,7 +392,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
* @offs: logical eraseblock offset to seek to
* @dtype: data type
*
- * This function targets the write buffer to logical eraseblock @lnum:@offs.
+ * This function targets the write-buffer to logical eraseblock @lnum:@offs.
* The write-buffer is synchronized if it is not empty. Returns zero in case of
* success and a negative error code in case of failure.
*/
@@ -399,7 +401,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
{
const struct ubifs_info *c = wbuf->c;
- dbg_io("LEB %d:%d", lnum, offs);
+ dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead);
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
ubifs_assert(offs >= 0 && offs <= c->leb_size);
ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
@@ -506,9 +508,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
struct ubifs_info *c = wbuf->c;
int err, written, n, aligned_len = ALIGN(len, 8), offs;
- dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len,
- dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum,
- wbuf->offs + wbuf->used);
+ dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len,
+ dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead,
+ wbuf->lnum, wbuf->offs + wbuf->used);
ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
@@ -533,8 +535,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
memcpy(wbuf->buf + wbuf->used, buf, len);
if (aligned_len == wbuf->avail) {
- dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum,
- wbuf->offs);
+ dbg_io("flush jhead %d wbuf to LEB %d:%d",
+ wbuf->jhead, wbuf->lnum, wbuf->offs);
err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
wbuf->offs, c->min_io_size,
wbuf->dtype);
@@ -562,7 +564,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
* minimal I/O unit. We have to fill and flush write-buffer and switch
* to the next min. I/O unit.
*/
- dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs);
+ dbg_io("flush jhead %d wbuf to LEB %d:%d",
+ wbuf->jhead, wbuf->lnum, wbuf->offs);
memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
c->min_io_size, wbuf->dtype);
@@ -695,7 +698,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
int err, rlen, overlap;
struct ubifs_ch *ch = buf;
- dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
+ dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs,
+ dbg_ntype(type), len, wbuf->jhead);
ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
ubifs_assert(!(offs & 7) && offs < c->leb_size);
ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
@@ -819,13 +823,12 @@ out:
* @c: UBIFS file-system description object
* @wbuf: write-buffer to initialize
*
- * This function initializes write buffer. Returns zero in case of success
+ * This function initializes write-buffer. Returns zero in case of success
* %-ENOMEM in case of failure.
*/
int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
{
size_t size;
- ktime_t hardlimit;
wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL);
if (!wbuf->buf)
@@ -851,22 +854,16 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
wbuf->timer.function = wbuf_timer_callback_nolock;
- /*
- * Make write-buffer soft limit to be 20% of the hard limit. The
- * write-buffer timer is allowed to expire any time between the soft
- * and hard limits.
- */
- hardlimit = ktime_set(DEFAULT_WBUF_TIMEOUT_SECS, 0);
- wbuf->delta = (DEFAULT_WBUF_TIMEOUT_SECS * NSEC_PER_SEC) * 2 / 10;
- wbuf->softlimit = ktime_sub_ns(hardlimit, wbuf->delta);
- hrtimer_set_expires_range_ns(&wbuf->timer, wbuf->softlimit,
- wbuf->delta);
+ wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
+ wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
+ wbuf->delta *= 1000000000ULL;
+ ubifs_assert(wbuf->delta <= ULONG_MAX);
return 0;
}
/**
* ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
- * @wbuf: the write-buffer whereto add
+ * @wbuf: the write-buffer where to add
* @inum: the inode number
*
* This function adds an inode number to the inode array of the write-buffer.
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 11cc80125a49..769be42f39d6 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -957,7 +957,7 @@ out:
return err;
out_dump:
- ubifs_err("log error detected while replying the log at LEB %d:%d",
+ ubifs_err("log error detected while replaying the log at LEB %d:%d",
lnum, offs + snod->offs);
dbg_dump_node(c, snod->node);
ubifs_scan_destroy(sleb);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 79fad43f3c57..3e2bae7efb44 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -797,7 +797,7 @@ static int alloc_wbufs(struct ubifs_info *c)
* does not need to be synchronized by timer.
*/
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
- c->jheads[GCHD].wbuf.softlimit = ktime_set(0, 0);
+ c->jheads[GCHD].wbuf.no_timer = 1;
return 0;
}
@@ -986,7 +986,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
switch (token) {
/*
* %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
- * We accepte them in order to be backware-compatible. But this
+ * We accept them in order to be backward-compatible. But this
* should be removed at some point.
*/
case Opt_fast_unmount:
@@ -1287,6 +1287,9 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_journal;
+ /* Calculate 'min_idx_lebs' after journal replay */
+ c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
+
err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
if (err)
goto out_orphans;
@@ -1754,10 +1757,8 @@ static void ubifs_put_super(struct super_block *sb)
/* Synchronize write-buffers */
if (c->jheads)
- for (i = 0; i < c->jhead_cnt; i++) {
+ for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
- hrtimer_cancel(&c->jheads[i].wbuf.timer);
- }
/*
* On fatal errors c->ro_media is set to 1, in which case we do
@@ -1970,12 +1971,14 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
*
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
*/
+ c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY;
c->bdi.unplug_io_fn = default_unplug_io_fn;
err = bdi_init(&c->bdi);
if (err)
goto out_close;
- err = bdi_register(&c->bdi, NULL, "ubifs");
+ err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d",
+ c->vi.ubi_num, c->vi.vol_id);
if (err)
goto out_bdi;
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 1bf01d820066..a29349094422 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -95,8 +95,9 @@
*/
#define BGT_NAME_PATTERN "ubifs_bgt%d_%d"
-/* Default write-buffer synchronization timeout in seconds */
-#define DEFAULT_WBUF_TIMEOUT_SECS 5
+/* Write-buffer synchronization timeout interval in seconds */
+#define WBUF_TIMEOUT_SOFTLIMIT 3
+#define WBUF_TIMEOUT_HARDLIMIT 5
/* Maximum possible inode number (only 32-bit inodes are supported now) */
#define MAX_INUM 0xFFFFFFFF
@@ -654,7 +655,8 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
* @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit
* and @softlimit + @delta)
* @timer: write-buffer timer
- * @need_sync: it is set if its timer expired and needs sync
+ * @no_timer: non-zero if this write-buffer does not have a timer
+ * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing
* @next_ino: points to the next position of the following inode number
* @inodes: stores the inode numbers of the nodes which are in wbuf
*
@@ -683,7 +685,8 @@ struct ubifs_wbuf {
ktime_t softlimit;
unsigned long long delta;
struct hrtimer timer;
- int need_sync;
+ unsigned int no_timer:1;
+ unsigned int need_sync:1;
int next_ino;
ino_t *inodes;
};
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index a220d36f789b..c709ed61a53e 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -687,7 +687,7 @@ xfs_barrier_test(
return error;
}
-void
+STATIC void
xfs_mountfs_check_barriers(xfs_mount_t *mp)
{
int error;
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index b619d6b8ca43..fbf3e0288b34 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -740,21 +740,6 @@ __xfs_inode_clear_reclaim_tag(
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
}
-void
-xfs_inode_clear_reclaim_tag(
- xfs_inode_t *ip)
-{
- xfs_mount_t *mp = ip->i_mount;
- xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
-
- read_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
- __xfs_inode_clear_reclaim_tag(mp, pag, ip);
- spin_unlock(&ip->i_flags_lock);
- read_unlock(&pag->pag_ici_lock);
- xfs_put_perag(mp, pag);
-}
-
STATIC int
xfs_reclaim_inode_now(
struct xfs_inode *ip,
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 2a10301c99c7..23e7e7e6e136 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -48,7 +48,6 @@ int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
-void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip);
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 7928b9983c1d..975972482e8f 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -3713,7 +3713,7 @@ done:
* entry (null if none). Else, *lastxp will be set to the index
* of the found entry; *gotp will contain the entry.
*/
-xfs_bmbt_rec_host_t * /* pointer to found extent entry */
+STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
xfs_bmap_search_multi_extents(
xfs_ifork_t *ifp, /* inode fork pointer */
xfs_fileoff_t bno, /* block number searched for */
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 1b8ff9256bd0..56f62d2edc35 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -392,17 +392,6 @@ xfs_bmap_count_blocks(
int whichfork,
int *count);
-/*
- * Search the extent records for the entry containing block bno.
- * If bno lies in a hole, point to the next entry. If bno lies
- * past eof, *eofp will be set, and *prevp will contain the last
- * entry (null if none). Else, *lastxp will be set to the index
- * of the found entry; *gotp will contain the entry.
- */
-xfs_bmbt_rec_host_t *
-xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
- xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
-
#endif /* __KERNEL__ */
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 5c1ade06578e..eb7b702d0690 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -202,16 +202,6 @@ xfs_bmbt_get_state(
ext_flag);
}
-/* Endian flipping versions of the bmbt extraction functions */
-void
-xfs_bmbt_disk_get_all(
- xfs_bmbt_rec_t *r,
- xfs_bmbt_irec_t *s)
-{
- __xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
- get_unaligned_be64(&r->l1), s);
-}
-
/*
* Extract the blockcount field from an on disk bmap extent record.
*/
@@ -816,6 +806,16 @@ xfs_bmbt_trace_key(
*l1 = 0;
}
+/* Endian flipping versions of the bmbt extraction functions */
+STATIC void
+xfs_bmbt_disk_get_all(
+ xfs_bmbt_rec_t *r,
+ xfs_bmbt_irec_t *s)
+{
+ __xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
+ get_unaligned_be64(&r->l1), s);
+}
+
STATIC void
xfs_bmbt_trace_record(
struct xfs_btree_cur *cur,
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index 0e8df007615e..5549d495947f 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -220,7 +220,6 @@ extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r);
extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r);
extern xfs_exntst_t xfs_bmbt_get_state(xfs_bmbt_rec_host_t *r);
-extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index e9df99574829..cde5a2668293 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -646,46 +646,6 @@ xfs_btree_read_bufl(
}
/*
- * Get a buffer for the block, return it read in.
- * Short-form addressing.
- */
-int /* error */
-xfs_btree_read_bufs(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* allocation group block number */
- uint lock, /* lock flags for read_buf */
- xfs_buf_t **bpp, /* buffer for agno/agbno */
- int refval) /* ref count value for buffer */
-{
- xfs_buf_t *bp; /* return value */
- xfs_daddr_t d; /* real disk block address */
- int error;
-
- ASSERT(agno != NULLAGNUMBER);
- ASSERT(agbno != NULLAGBLOCK);
- d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
- mp->m_bsize, lock, &bp))) {
- return error;
- }
- ASSERT(!bp || !XFS_BUF_GETERROR(bp));
- if (bp != NULL) {
- switch (refval) {
- case XFS_ALLOC_BTREE_REF:
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
- break;
- case XFS_INO_BTREE_REF:
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, refval);
- break;
- }
- }
- *bpp = bp;
- return 0;
-}
-
-/*
* Read-ahead the block, don't wait for it, don't return a buffer.
* Long-form addressing.
*/
@@ -2951,7 +2911,7 @@ error0:
* inode we have to copy the single block it was pointing to into the
* inode.
*/
-int
+STATIC int
xfs_btree_kill_iroot(
struct xfs_btree_cur *cur)
{
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 4f852b735b96..7fa07062bdda 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -379,20 +379,6 @@ xfs_btree_read_bufl(
int refval);/* ref count value for buffer */
/*
- * Get a buffer for the block, return it read in.
- * Short-form addressing.
- */
-int /* error */
-xfs_btree_read_bufs(
- struct xfs_mount *mp, /* file system mount point */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* allocation group block number */
- uint lock, /* lock flags for read_buf */
- struct xfs_buf **bpp, /* buffer for agno/agbno */
- int refval);/* ref count value for buffer */
-
-/*
* Read-ahead the block, don't wait for it, don't return a buffer.
* Long-form addressing.
*/
@@ -432,7 +418,6 @@ int xfs_btree_decrement(struct xfs_btree_cur *, int, int *);
int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *);
int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
-int xfs_btree_kill_iroot(struct xfs_btree_cur *);
int xfs_btree_insert(struct xfs_btree_cur *, int *);
int xfs_btree_delete(struct xfs_btree_cur *, int *);
int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1f22d65fed0a..2dcb3d781ae5 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -641,7 +641,7 @@ xfs_iformat_btree(
return 0;
}
-void
+STATIC void
xfs_dinode_from_disk(
xfs_icdinode_t *to,
xfs_dinode_t *from)
@@ -1237,7 +1237,7 @@ xfs_isize_check(
* In that case the pages will still be in memory, but the inode size
* will never have been updated.
*/
-xfs_fsize_t
+STATIC xfs_fsize_t
xfs_file_last_byte(
xfs_inode_t *ip)
{
@@ -3827,7 +3827,7 @@ xfs_iext_inline_to_direct(
/*
* Resize an extent indirection array to new_size bytes.
*/
-void
+STATIC void
xfs_iext_realloc_indirect(
xfs_ifork_t *ifp, /* inode fork pointer */
int new_size) /* new indirection array size */
@@ -3852,7 +3852,7 @@ xfs_iext_realloc_indirect(
/*
* Switch from indirection array to linear (direct) extent allocations.
*/
-void
+STATIC void
xfs_iext_indirect_to_direct(
xfs_ifork_t *ifp) /* inode fork pointer */
{
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1804f866a71d..5104daedaf6c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -521,7 +521,6 @@ void xfs_ipin(xfs_inode_t *);
void xfs_iunpin(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
void xfs_ichgtime(xfs_inode_t *, int);
-xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
@@ -589,8 +588,6 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
struct xfs_buf **, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, xfs_daddr_t, uint);
-void xfs_dinode_from_disk(struct xfs_icdinode *,
- struct xfs_dinode *);
void xfs_dinode_to_disk(struct xfs_dinode *,
struct xfs_icdinode *);
void xfs_idestroy_fork(struct xfs_inode *, int);
@@ -609,8 +606,6 @@ void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_realloc_direct(xfs_ifork_t *, int);
-void xfs_iext_realloc_indirect(xfs_ifork_t *, int);
-void xfs_iext_indirect_to_direct(xfs_ifork_t *);
void xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t);
void xfs_iext_inline_to_direct(xfs_ifork_t *, int);
void xfs_iext_destroy(xfs_ifork_t *);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index aeb2d2221c7d..c471122d2234 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -39,7 +39,7 @@
#include "xfs_error.h"
#include "xfs_btree.h"
-int
+STATIC int
xfs_internal_inum(
xfs_mount_t *mp,
xfs_ino_t ino)
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 1fb04e7deb61..20792bf45946 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -99,11 +99,6 @@ xfs_bulkstat_one(
void *dibuff,
int *stat);
-int
-xfs_internal_inum(
- xfs_mount_t *mp,
- xfs_ino_t ino);
-
typedef int (*inumbers_fmt_pf)(
void __user *ubuffer, /* buffer to write to */
const xfs_inogrp_t *buffer, /* buffer to read from */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index bcad5f4c1fd1..679c7c4926a2 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -451,8 +451,6 @@ extern int xlog_find_tail(xlog_t *log,
extern int xlog_recover(xlog_t *log);
extern int xlog_recover_finish(xlog_t *log);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
-extern void xlog_recover_process_iunlinks(xlog_t *log);
-
extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
extern void xlog_put_bp(struct xfs_buf *);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 47da2fb45377..1099395d7d6c 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3263,7 +3263,7 @@ xlog_recover_process_one_iunlink(
* freeing of the inode and its removal from the list must be
* atomic.
*/
-void
+STATIC void
xlog_recover_process_iunlinks(
xlog_t *log)
{
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 5c6f092659c1..8b6c9e807efb 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1568,7 +1568,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
*
* The m_sb_lock must be held when this routine is called.
*/
-int
+STATIC int
xfs_mod_incore_sb_unlocked(
xfs_mount_t *mp,
xfs_sb_field_t field,
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a5122382afde..a6c023bc0fb2 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -414,13 +414,10 @@ typedef struct xfs_mod_sb {
extern int xfs_log_sbcount(xfs_mount_t *, uint);
extern int xfs_mountfs(xfs_mount_t *mp);
-extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
extern void xfs_unmountfs(xfs_mount_t *);
extern int xfs_unmountfs_writesb(xfs_mount_t *);
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
-extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t,
- int64_t, int);
extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
uint, int);
extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index afee7eb24323..4b0613d99faa 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -564,35 +564,6 @@ xfs_mru_cache_lookup(
}
/*
- * To look up an element using its key, but leave its location in the internal
- * lists alone, call xfs_mru_cache_peek(). If the element isn't found, this
- * function returns NULL.
- *
- * See the comments above the declaration of the xfs_mru_cache_lookup() function
- * for important locking information pertaining to this call.
- */
-void *
-xfs_mru_cache_peek(
- xfs_mru_cache_t *mru,
- unsigned long key)
-{
- xfs_mru_cache_elem_t *elem;
-
- ASSERT(mru && mru->lists);
- if (!mru || !mru->lists)
- return NULL;
-
- spin_lock(&mru->lock);
- elem = radix_tree_lookup(&mru->store, key);
- if (!elem)
- spin_unlock(&mru->lock);
- else
- __release(mru_lock); /* help sparse not be stupid */
-
- return elem ? elem->value : NULL;
-}
-
-/*
* To release the internal data structure spinlock after having performed an
* xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done()
* with the data store pointer.
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index dd58ea1bbebe..5d439f34b0c9 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -49,7 +49,6 @@ int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
void * xfs_mru_cache_remove(struct xfs_mru_cache *mru, unsigned long key);
void xfs_mru_cache_delete(struct xfs_mru_cache *mru, unsigned long key);
void *xfs_mru_cache_lookup(struct xfs_mru_cache *mru, unsigned long key);
-void *xfs_mru_cache_peek(struct xfs_mru_cache *mru, unsigned long key);
void xfs_mru_cache_done(struct xfs_mru_cache *mru);
#endif /* __XFS_MRU_CACHE_H__ */
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index f76c003ec55d..ae65f0df87da 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -78,10 +78,4 @@ extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp,
xfs_buf_t *bp, xfs_daddr_t blkno);
-/*
- * Prototypes for functions in xfs_vnodeops.c.
- */
-extern int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
- int flags);
-
#endif /* __XFS_RW_H__ */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index c4eca5ed5dab..1dd706887156 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -716,7 +716,7 @@ xfs_fsync(
* when the link count isn't zero and by xfs_dm_punch_hole() when
* punching a hole to EOF.
*/
-int
+STATIC int
xfs_free_eofblocks(
xfs_mount_t *mp,
xfs_inode_t *ip,
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c65e4ce6c3af..79a6c5ebe908 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -89,7 +89,6 @@ struct acpi_device;
typedef int (*acpi_op_add) (struct acpi_device * device);
typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
typedef int (*acpi_op_start) (struct acpi_device * device);
-typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
typedef int (*acpi_op_suspend) (struct acpi_device * device,
pm_message_t state);
typedef int (*acpi_op_resume) (struct acpi_device * device);
@@ -106,7 +105,6 @@ struct acpi_device_ops {
acpi_op_add add;
acpi_op_remove remove;
acpi_op_start start;
- acpi_op_stop stop;
acpi_op_suspend suspend;
acpi_op_resume resume;
acpi_op_bind bind;
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h
index c17c9600f220..d7c76bba640d 100644
--- a/include/asm-generic/device.h
+++ b/include/asm-generic/device.h
@@ -9,4 +9,7 @@
struct dev_archdata {
};
+struct pdev_archdata {
+};
+
#endif /* _ASM_GENERIC_DEVICE_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index b4326b5466eb..26373cff4546 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -30,7 +30,18 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
res->end = region->end;
}
-#define pcibios_scan_all_fns(a, b) 0
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d7d50d7ee51e..aa00800adacc 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -97,4 +97,8 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
+#ifndef PER_CPU_DEF_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
+#endif
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 88bada2ebc4b..510df36dd5d4 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -37,9 +37,6 @@
#ifndef parent_node
#define parent_node(node) ((void)(node),0)
#endif
-#ifndef node_to_cpumask
-#define node_to_cpumask(node) ((void)node, cpu_online_map)
-#endif
#ifndef cpumask_of_node
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#endif
@@ -55,18 +52,4 @@
#endif /* CONFIG_NUMA */
-/*
- * returns pointer to cpumask for specified node
- * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
- */
-#ifndef node_to_cpumask_ptr
-
-#define node_to_cpumask_ptr(v, node) \
- cpumask_t _##v = node_to_cpumask(node); \
- const cpumask_t *v = &_##v
-
-#define node_to_cpumask_ptr_next(v, node) \
- _##v = node_to_cpumask(node)
-#endif
-
#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 92b73b6140ff..9dd9a2466fea 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -191,7 +191,7 @@
. = ALIGN(align); \
*(.data.cacheline_aligned)
-#define INIT_TASK(align) \
+#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
*(.data.init_task)
@@ -434,14 +434,15 @@
/*
* Init task
*/
-#define INIT_TASK_DATA(align) \
+#define INIT_TASK_DATA_SECTION(align) \
. = ALIGN(align); \
.data.init_task : { \
- INIT_TASK \
+ INIT_TASK_DATA(align) \
}
#ifdef CONFIG_CONSTRUCTORS
-#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \
+#define KERNEL_CTORS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
@@ -628,6 +629,14 @@
#define INIT_RAM_FS
#endif
+#define DISCARDS \
+ /DISCARD/ : { \
+ EXIT_TEXT \
+ EXIT_DATA \
+ *(.exitcall.exit) \
+ *(.discard) \
+ }
+
/**
* PERCPU_VADDR - define output section for percpu area
* @vaddr: explicit base address (optional)
@@ -704,15 +713,15 @@
* matches the requirment of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
-#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
+#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
. = ALIGN(PAGE_SIZE); \
.data : AT(ADDR(.data) - LOAD_OFFSET) { \
- INIT_TASK(inittask) \
+ INIT_TASK_DATA(inittask) \
CACHELINE_ALIGNED_DATA(cacheline) \
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
CONSTRUCTORS \
- NOSAVE_DATA(nosave) \
+ NOSAVE_DATA \
PAGE_ALIGNED_DATA(pagealigned) \
}
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c263e4d71754..7d6c9a2dfcbb 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -35,11 +35,11 @@ struct est_timings {
} __attribute__((packed));
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
-#define EDID_TIMING_ASPECT_SHIFT 0
+#define EDID_TIMING_ASPECT_SHIFT 6
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
/* need to add 60 */
-#define EDID_TIMING_VFREQ_SHIFT 2
+#define EDID_TIMING_VFREQ_SHIFT 0
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
struct std_timing {
@@ -47,11 +47,11 @@ struct std_timing {
u8 vfreq_aspect;
} __attribute__((packed));
-#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 6)
-#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 5)
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
-#define DRM_EDID_PT_STEREO (1 << 2)
-#define DRM_EDID_PT_INTERLACED (1 << 1)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
@@ -93,7 +93,7 @@ struct detailed_data_monitor_range {
} __attribute__((packed));
struct detailed_data_wpindex {
- u8 white_xy_lo; /* Upper 2 bits each */
+ u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
@@ -135,21 +135,21 @@ struct detailed_timing {
} data;
} __attribute__((packed));
-#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 7)
-#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 5)
-#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 4)
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
-#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 2)
-#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 1)
-#define DRM_EDID_INPUT_DIGITAL (1 << 0) /* bits above must be zero if set */
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */
-#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 7)
-#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 6)
-#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 5)
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
-#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 2)
-#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 1)
-#define DRM_EDID_FEATURE_PM_STANDBY (1 << 0)
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
struct edid {
u8 header[8];
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b16a957030f8..47f7d932a01d 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -121,9 +121,9 @@ struct kiocb {
/*
* If the aio_resfd field of the userspace iocb is not zero,
- * this is the underlying file* to deliver event to.
+ * this is the underlying eventfd context to deliver events to.
*/
- struct file *ki_eventfd;
+ struct eventfd_ctx *ki_eventfd;
};
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0ec2c594868e..e623c579cac6 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,9 @@
#include <linux/proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/srcu.h>
+#include <linux/writeback.h>
#include <asm/atomic.h>
struct page;
@@ -23,9 +26,12 @@ struct dentry;
* Bits in backing_dev_info.state
*/
enum bdi_state {
- BDI_pdflush, /* A pdflush thread is working this device */
+ BDI_pending, /* On its way to being activated */
+ BDI_wb_alloc, /* Default embedded wb allocated */
+ BDI_wblist_lock, /* bdi->wb_list now needs locking */
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
+ BDI_registered, /* bdi_register() was done */
BDI_unused, /* Available bits start here */
};
@@ -39,7 +45,23 @@ enum bdi_stat_item {
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+struct bdi_writeback {
+ struct list_head list; /* hangs off the bdi */
+
+ struct backing_dev_info *bdi; /* our parent bdi */
+ unsigned int nr;
+
+ struct task_struct *task; /* writeback task */
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+};
+
+#define BDI_MAX_FLUSHERS 32
+
struct backing_dev_info {
+ struct srcu_struct srcu; /* for wb_list read side protection */
+ struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
@@ -48,6 +70,8 @@ struct backing_dev_info {
void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
void *unplug_io_data;
+ char *name;
+
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
struct prop_local_percpu completions;
@@ -56,6 +80,14 @@ struct backing_dev_info {
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
+ struct bdi_writeback wb; /* default writeback info for this bdi */
+ spinlock_t wb_lock; /* protects update side of wb_list */
+ struct list_head wb_list; /* the flusher threads hanging off this bdi */
+ unsigned long wb_mask; /* bitmask of registered tasks */
+ unsigned int wb_cnt; /* number of registered tasks */
+
+ struct list_head work_list;
+
struct device *dev;
#ifdef CONFIG_DEBUG_FS
@@ -71,6 +103,32 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+ long nr_pages, enum writeback_sync_modes sync_mode);
+int bdi_writeback_task(struct bdi_writeback *wb);
+void bdi_writeback_all(struct super_block *sb, struct writeback_control *wbc);
+void bdi_add_flusher_task(struct backing_dev_info *bdi);
+int bdi_has_dirty_io(struct backing_dev_info *bdi);
+
+extern spinlock_t bdi_lock;
+extern struct list_head bdi_list;
+
+static inline int wb_is_default_task(struct bdi_writeback *wb)
+{
+ return wb == &wb->bdi->wb;
+}
+
+static inline int bdi_wblist_needs_lock(struct backing_dev_info *bdi)
+{
+ return test_bit(BDI_wblist_lock, &bdi->state);
+}
+
+static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+{
+ return !list_empty(&wb->b_dirty) ||
+ !list_empty(&wb->b_io) ||
+ !list_empty(&wb->b_more_io);
+}
static inline void __add_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item, s64 amount)
@@ -256,6 +314,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
return bdi->capabilities & BDI_CAP_SWAP_BACKED;
}
+static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
+{
+ return bdi == &default_backing_dev_info;
+}
+
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -271,4 +334,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping)
return bdi_cap_swap_backed(mapping->backing_dev_info);
}
+static inline int bdi_sched_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2a04eb54c0dd..a299ed38fcd7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -142,37 +142,40 @@ struct bio {
*
* bit 0 -- data direction
* If not set, bio is a read from device. If set, it's a write to device.
- * bit 1 -- rw-ahead when set
- * bit 2 -- barrier
+ * bit 1 -- fail fast device errors
+ * bit 2 -- fail fast transport errors
+ * bit 3 -- fail fast driver errors
+ * bit 4 -- rw-ahead when set
+ * bit 5 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this one is issued.
- * bit 3 -- synchronous I/O hint.
- * bit 4 -- Unplug the device immediately after submitting this bio.
- * bit 5 -- metadata request
+ * bit 6 -- synchronous I/O hint.
+ * bit 7 -- Unplug the device immediately after submitting this bio.
+ * bit 8 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
- * bit 6 -- discard sectors
+ * bit 9 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
- * bit 7 -- fail fast device errors
- * bit 8 -- fail fast transport errors
- * bit 9 -- fail fast driver errors
* Don't want driver retries for any fast fail whatever the reason.
* bit 10 -- Tell the IO scheduler not to wait for more requests after this
one has been submitted, even if it is a SYNC request.
*/
-#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
-#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
-#define BIO_RW_BARRIER 2
-#define BIO_RW_SYNCIO 3
-#define BIO_RW_UNPLUG 4
-#define BIO_RW_META 5
-#define BIO_RW_DISCARD 6
-#define BIO_RW_FAILFAST_DEV 7
-#define BIO_RW_FAILFAST_TRANSPORT 8
-#define BIO_RW_FAILFAST_DRIVER 9
-#define BIO_RW_NOIDLE 10
+enum bio_rw_flags {
+ BIO_RW,
+ BIO_RW_FAILFAST_DEV,
+ BIO_RW_FAILFAST_TRANSPORT,
+ BIO_RW_FAILFAST_DRIVER,
+ /* above flags must match REQ_* */
+ BIO_RW_AHEAD,
+ BIO_RW_BARRIER,
+ BIO_RW_SYNCIO,
+ BIO_RW_UNPLUG,
+ BIO_RW_META,
+ BIO_RW_DISCARD,
+ BIO_RW_NOIDLE,
+};
#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
@@ -319,7 +322,6 @@ static inline int bio_has_allocated_vec(struct bio *bio)
*/
struct bio_integrity_payload {
struct bio *bip_bio; /* parent bio */
- struct bio_vec *bip_vec; /* integrity data vector */
sector_t bip_sector; /* virtual start sector */
@@ -328,11 +330,12 @@ struct bio_integrity_payload {
unsigned int bip_size;
- unsigned short bip_pool; /* pool the ivec came from */
+ unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_idx; /* current bip_vec index */
struct work_struct bip_work; /* I/O completion */
+ struct bio_vec bip_vec[0]; /* embedded bvec array */
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -430,6 +433,9 @@ struct bio_set {
unsigned int front_pad;
mempool_t *bio_pool;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ mempool_t *bio_integrity_pool;
+#endif
mempool_t *bvec_pool;
};
@@ -634,8 +640,9 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
#define bio_integrity(bio) (bio->bi_integrity != NULL)
+extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern void bio_integrity_free(struct bio *);
+extern void bio_integrity_free(struct bio *, struct bio_set *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -645,21 +652,27 @@ extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
+extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
+extern int bioset_integrity_create(struct bio_set *, int);
+extern void bioset_integrity_free(struct bio_set *);
+extern void bio_integrity_init(void);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0)
+#define bioset_integrity_create(a, b) (0)
#define bio_integrity_prep(a) (0)
#define bio_integrity_enabled(a) (0)
-#define bio_integrity_clone(a, b, c) (0)
-#define bio_integrity_free(a) do { } while (0)
+#define bio_integrity_clone(a, b, c, d) (0)
+#define bioset_integrity_free(a) do { } while (0)
+#define bio_integrity_free(a, b) do { } while (0)
#define bio_integrity_endio(a, b) do { } while (0)
#define bio_integrity_advance(a, b) do { } while (0)
#define bio_integrity_trim(a, b, c) do { } while (0)
#define bio_integrity_split(a, b, c) do { } while (0)
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0)
+#define bio_integrity_init(a) do { } while (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8963d9149b5f..e58079f2dc00 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -98,6 +98,7 @@ enum rq_flag_bits {
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
+ /* above flags must match BIO_RW_* */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
@@ -119,6 +120,7 @@ enum rq_flag_bits {
__REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_NOIDLE, /* Don't anticipate more IO after this one */
__REQ_IO_STAT, /* account I/O stat */
+ __REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_NR_BITS, /* stops here */
};
@@ -147,6 +149,10 @@ enum rq_flag_bits {
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
+
+#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
+ REQ_FAILFAST_DRIVER)
#define BLK_MAX_CDB 16
@@ -301,12 +307,6 @@ struct blk_queue_tag {
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-struct blk_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
- struct kobject kobj;
-};
-
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
@@ -445,7 +445,6 @@ struct request_queue
#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device bsg_dev;
#endif
- struct blk_cmd_filter cmd_filter;
};
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
@@ -839,11 +838,13 @@ static inline void blk_run_address_space(struct address_space *mapping)
}
/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_err_bytes() : bytes left till the next error boundary
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_err_sectors() : sectors left till the next error boundary
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
@@ -860,6 +861,8 @@ static inline int blk_rq_cur_bytes(const struct request *rq)
return rq->bio ? bio_cur_bytes(rq->bio) : 0;
}
+extern unsigned int blk_rq_err_bytes(const struct request *rq);
+
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
return blk_rq_bytes(rq) >> 9;
@@ -870,6 +873,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+static inline unsigned int blk_rq_err_sectors(const struct request *rq)
+{
+ return blk_rq_err_bytes(rq) >> 9;
+}
+
/*
* Request issue related functions.
*/
@@ -896,10 +904,12 @@ extern bool blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, int error);
extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool blk_end_request_err(struct request *rq, int error);
extern bool __blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, int error);
extern bool __blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request_err(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
@@ -998,13 +1008,7 @@ static inline int sb_issue_discard(struct super_block *sb,
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
}
-/*
-* command filter functions
-*/
-extern int blk_verify_command(struct blk_cmd_filter *filter,
- unsigned char *cmd, fmode_t has_write_perm);
-extern void blk_unregister_filter(struct gendisk *disk);
-extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
+extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c3021105edc0..c8f2a5f70ed5 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -7,7 +7,7 @@
*
* See here for the libcap library ("POSIX draft" compliance):
*
- * ftp://linux.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
+ * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
*/
#ifndef _LINUX_CAPABILITY_H
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c5ac87ca7bc6..5b44e9fe510d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -3,444 +3,37 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number.
- *
- * The new cpumask_ ops take a "struct cpumask *"; the old ones
- * use cpumask_t.
- *
- * See detailed comments in the file linux/bitmap.h describing the
- * data type on which these cpumasks are based.
- *
- * For details of cpumask_scnprintf() and cpumask_parse_user(),
- * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
- * For details of cpulist_scnprintf() and cpulist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
- * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
- * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
- * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
- *
- * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
- * Note: The alternate operations with the suffix "_nr" are used
- * to limit the range of the loop to nr_cpu_ids instead of
- * NR_CPUS when NR_CPUS > 64 for performance reasons.
- * If NR_CPUS is <= 64 then most assembler bitmask
- * operators execute faster with a constant range, so
- * the operator will continue to use NR_CPUS.
- *
- * Another consideration is that nr_cpu_ids is initialized
- * to NR_CPUS and isn't lowered until the possible cpus are
- * discovered (including any disabled cpus). So early uses
- * will span the entire range of NR_CPUS.
- * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
- *
- * The obsolescent cpumask operations are:
- *
- * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
- * void cpus_setall(mask) set all bits
- * void cpus_clear(mask) clear all bits
- * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
- *
- * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
- * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
- * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
- * void cpus_complement(dst, src) dst = ~src
- *
- * int cpus_equal(mask1, mask2) Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
- * int cpus_empty(mask) Is mask empty (no bits sets)?
- * int cpus_full(mask) Is mask full (all bits sets)?
- * int cpus_weight(mask) Hamming weigh - number of set bits
- * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
- *
- * void cpus_shift_right(dst, src, n) Shift right
- * void cpus_shift_left(dst, src, n) Shift left
- *
- * int first_cpu(mask) Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
- * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
- *
- * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
- * (can be used as an lvalue)
- * CPU_MASK_ALL Initializer - all bits set
- * CPU_MASK_NONE Initializer - no bits set
- * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
- *
- * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
- * variables, and CPUMASK_PTR provides pointers to each field.
- *
- * The structure should be defined something like this:
- * struct my_cpumasks {
- * cpumask_t mask1;
- * cpumask_t mask2;
- * };
- *
- * Usage is then:
- * CPUMASK_ALLOC(my_cpumasks);
- * CPUMASK_PTR(mask1, my_cpumasks);
- * CPUMASK_PTR(mask2, my_cpumasks);
- *
- * --- DO NOT reference cpumask_t pointers until this check ---
- * if (my_cpumasks == NULL)
- * "kmalloc failed"...
- *
- * References are now pointers to the cpumask_t variables (*mask1, ...)
- *
- *if NR_CPUS > BITS_PER_LONG
- * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
- * kmalloc(sizeof(*m), GFP_KERNEL)
- * CPUMASK_FREE(m) Macro for kfree(m)
- *else
- * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
- * CPUMASK_FREE(m) Nop
- *endif
- * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
- * ------------------------------------------------------------------------
- *
- * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
- * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
- * int cpulist_parse(buf, map) Parse ascii string as cpulist
- * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
- * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
- * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
- *
- * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
- * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
- *
- * int num_online_cpus() Number of online CPUs
- * int num_possible_cpus() Number of all possible CPUs
- * int num_present_cpus() Number of present CPUs
- *
- * int cpu_online(cpu) Is some cpu online?
- * int cpu_possible(cpu) Is some cpu possible?
- * int cpu_present(cpu) Is some cpu present (can schedule)?
- *
- * int any_online_cpu(mask) First online cpu in mask
- *
- * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
- * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
- * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
- * to generate slightly worse code. Note for example the additional
- * 40 lines of assembly code compiling the "for each possible cpu"
- * loops buried in the disk_stat_read() macros calls when compiling
- * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
- * one-line #define for cpu_isset(), instead of wrapping an inline
- * inside a macro, the way we do the other calls.
+ * set of CPU's in a system, one bit position per CPU number. In general,
+ * only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
-
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
- set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
- clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
- bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
- return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
- __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
- const cpumask_t *srcp, int nbits)
-{
- bitmap_complement(dstp->bits, srcp->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
- return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
-{
- return bitmap_full(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
- return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_right(dst, src, n) \
- __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
- __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
- * @bitmap: the bitmap
- *
- * There are a few places where cpumask_var_t isn't appropriate and
- * static cpumasks must be used (eg. very early boot), yet we don't
- * expose the definition of 'struct cpumask'.
- *
- * This does the conversion, and can be used as a constant initializer.
- */
-#define to_cpumask(bitmap) \
- ((struct cpumask *)(1 ? (bitmap) \
- : (void *)sizeof(__check_is_bitmap(bitmap))))
-
-static inline int __check_is_bitmap(const unsigned long *bitmap)
-{
- return 1;
-}
-
-/*
- * Special-case data structure for "single bit set only" constant CPU masks.
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
*
- * We pre-generate all the 64 (or 32) possible bit positions, with enough
- * padding to the left and the right, and return the constant pointer
- * appropriately offset.
- */
-extern const unsigned long
- cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
-{
- const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
- p -= cpu / BITS_PER_LONG;
- return to_cpumask(p);
-}
-
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-/*
- * In cases where we take the address of the cpumask immediately,
- * gcc optimizes it out (it's a constant) and there's no huge stack
- * variable created:
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
*/
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL \
-(cpumask_t) { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
-
-#else
-
-#define CPU_MASK_ALL \
-(cpumask_t) { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-/* cpu_mask_all is in init/main.c */
-extern cpumask_t cpu_mask_all;
-#define CPU_MASK_ALL_PTR (&cpu_mask_all)
-
-#endif
-
-#define CPU_MASK_NONE \
-(cpumask_t) { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
-} }
-
-#define CPU_MASK_CPU0 \
-(cpumask_t) { { \
- [0] = 1UL \
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#if NR_CPUS > BITS_PER_LONG
-#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define CPUMASK_FREE(m) kfree(m)
-#else
-#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
-#define CPUMASK_FREE(m)
-#endif
-#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
-
-#define cpu_remap(oldbit, old, new) \
- __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
-static inline int __cpu_remap(int oldbit,
- const cpumask_t *oldp, const cpumask_t *newp, int nbits)
-{
- return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
-}
-
-#define cpus_remap(dst, src, old, new) \
- __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
-static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
- const cpumask_t *oldp, const cpumask_t *newp, int nbits)
-{
- bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
-}
-
-#define cpus_onto(dst, orig, relmap) \
- __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
-static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
- const cpumask_t *relmapp, int nbits)
-{
- bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
-}
-
-#define cpus_fold(dst, orig, sz) \
- __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
-static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
- int sz, int nbits)
-{
- bitmap_fold(dstp->bits, origp->bits, sz, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+#define cpumask_bits(maskp) ((maskp)->bits)
#if NR_CPUS == 1
-
#define nr_cpu_ids 1
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define first_cpu(src) ({ (void)(src); 0; })
-#define next_cpu(n, src) ({ (void)(src); 1; })
-#define any_online_cpu(mask) 0
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-#else /* NR_CPUS > 1 */
-
+#else
extern int nr_cpu_ids;
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-int __any_online_cpu(const cpumask_t *mask);
-
-#define first_cpu(src) __first_cpu(&(src))
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#define any_online_cpu(mask) __any_online_cpu(&(mask))
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu((cpu), (mask)), \
- (cpu) < NR_CPUS; )
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
#endif
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#if NR_CPUS <= 64
-
-#define next_cpu_nr(n, src) next_cpu(n, src)
-#define cpus_weight_nr(cpumask) cpus_weight(cpumask)
-#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
-#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
-#define for_each_cpu_mask_nr(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu_nr((cpu), (mask)), \
- (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
+ * not all bits may be allocated. */
+#define nr_cpumask_bits nr_cpu_ids
+#else
+#define nr_cpumask_bits NR_CPUS
+#endif
/*
* The following particular system cpumasks and operations manage
@@ -487,12 +80,6 @@ extern const struct cpumask *const cpu_online_mask;
extern const struct cpumask *const cpu_present_mask;
extern const struct cpumask *const cpu_active_mask;
-/* These strip const, as traditionally they weren't const. */
-#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
-#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
-#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
-#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
-
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
@@ -511,35 +98,6 @@ extern const struct cpumask *const cpu_active_mask;
#define cpu_active(cpu) ((cpu) == 0)
#endif
-#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-
-/* These are the new versions of the cpumask operators: passed by pointer.
- * The older versions will be implemented in terms of these, then deleted. */
-#define cpumask_bits(maskp) ((maskp)->bits)
-
-#if NR_CPUS <= BITS_PER_LONG
-#define CPU_BITS_ALL \
-{ \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-}
-
-#else /* NR_CPUS > BITS_PER_LONG */
-
-#define CPU_BITS_ALL \
-{ \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-}
-#endif /* NR_CPUS > BITS_PER_LONG */
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
-#else
-#define nr_cpumask_bits NR_CPUS
-#endif
-
/* verify cpu argument to cpumask_* operators */
static inline unsigned int cpumask_check(unsigned int cpu)
{
@@ -1088,4 +646,241 @@ void set_cpu_active(unsigned int cpu, bool active);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
+
+/**
+ * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * @bitmap: the bitmap
+ *
+ * There are a few places where cpumask_var_t isn't appropriate and
+ * static cpumasks must be used (eg. very early boot), yet we don't
+ * expose the definition of 'struct cpumask'.
+ *
+ * This does the conversion, and can be used as a constant initializer.
+ */
+#define to_cpumask(bitmap) \
+ ((struct cpumask *)(1 ? (bitmap) \
+ : (void *)sizeof(__check_is_bitmap(bitmap))))
+
+static inline int __check_is_bitmap(const unsigned long *bitmap)
+{
+ return 1;
+}
+
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+ p -= cpu / BITS_PER_LONG;
+ return to_cpumask(p);
+}
+
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
+#if NR_CPUS <= BITS_PER_LONG
+#define CPU_BITS_ALL \
+{ \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+
+#else /* NR_CPUS > BITS_PER_LONG */
+
+#define CPU_BITS_ALL \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+#endif /* NR_CPUS > BITS_PER_LONG */
+
+/*
+ *
+ * From here down, all obsolete. Use cpumask_ variants!
+ *
+ */
+#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
+/* These strip const, as traditionally they weren't const. */
+#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
+#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
+#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
+#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
+
+#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
+
+#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
+
+#if NR_CPUS <= BITS_PER_LONG
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#else
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#endif
+
+#define CPU_MASK_NONE \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+} }
+
+#define CPU_MASK_CPU0 \
+(cpumask_t) { { \
+ [0] = 1UL \
+} }
+
+#if NR_CPUS == 1
+#define first_cpu(src) ({ (void)(src); 0; })
+#define next_cpu(n, src) ({ (void)(src); 1; })
+#define any_online_cpu(mask) 0
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#else /* NR_CPUS > 1 */
+int __first_cpu(const cpumask_t *srcp);
+int __next_cpu(int n, const cpumask_t *srcp);
+int __any_online_cpu(const cpumask_t *mask);
+
+#define first_cpu(src) __first_cpu(&(src))
+#define next_cpu(n, src) __next_cpu((n), &(src))
+#define any_online_cpu(mask) __any_online_cpu(&(mask))
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = next_cpu((cpu), (mask)), \
+ (cpu) < NR_CPUS; )
+#endif /* SMP */
+
+#if NR_CPUS <= 64
+
+#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
+
+#else /* NR_CPUS > 64 */
+
+int __next_cpu_nr(int n, const cpumask_t *srcp);
+#define for_each_cpu_mask_nr(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = __next_cpu_nr((cpu), &(mask)), \
+ (cpu) < nr_cpu_ids; )
+
+#endif /* NR_CPUS > 64 */
+
+#define cpus_addr(src) ((src).bits)
+
+#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+ set_bit(cpu, dstp->bits);
+}
+
+#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
+static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+ clear_bit(cpu, dstp->bits);
+}
+
+#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
+static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
+static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
+
+#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
+static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+ return test_and_set_bit(cpu, addr->bits);
+}
+
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_andnot(dst, src1, src2) \
+ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_equal(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_intersects(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_subset(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define cpus_shift_left(dst, src, n) \
+ __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_left(cpumask_t *dstp,
+ const cpumask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 1fbdea4f08eb..bbebef7713b3 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -499,6 +499,7 @@ struct cyclades_card {
void __iomem *p9050;
struct RUNTIME_9060 __iomem *p9060;
} ctl_addr;
+ struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */
int irq;
unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
unsigned int first_line; /* minor number of first channel on card */
@@ -541,6 +542,15 @@ struct cyclades_port {
int magic;
struct tty_port port;
struct cyclades_card *card;
+ union {
+ struct {
+ void __iomem *base_addr;
+ } cyy;
+ struct {
+ struct CH_CTRL __iomem *ch_ctrl;
+ struct BUF_CTRL __iomem *buf_ctrl;
+ } cyz;
+ } u;
int line;
int flags; /* defined in tty.h */
int type; /* UART type */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index f45a8ae5f828..3b85ba6479f4 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -8,10 +8,8 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#ifdef CONFIG_EVENTFD
-
-/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
+#include <linux/file.h>
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
@@ -27,16 +25,37 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+#ifdef CONFIG_EVENTFD
+
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
+void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
-int eventfd_signal(struct file *file, int n);
+struct eventfd_ctx *eventfd_ctx_fdget(int fd);
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
+int eventfd_signal(struct eventfd_ctx *ctx, int n);
#else /* CONFIG_EVENTFD */
-#define eventfd_fget(fd) ERR_PTR(-ENOSYS)
-static inline int eventfd_signal(struct file *file, int n)
-{ return 0; }
+/*
+ * Ugly ugly ugly error layer to support modules that uses eventfd but
+ * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
+ */
+static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+{
+ return -ENOSYS;
+}
+
+static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+
+}
-#endif /* CONFIG_EVENTFD */
+#endif
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dd68358996b7..f847df9e99b6 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -819,6 +819,7 @@ struct fb_info {
int node;
int flags;
struct mutex lock; /* Lock for open/release/ioctl funcs */
+ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
diff --git a/include/linux/fips.h b/include/linux/fips.h
new file mode 100644
index 000000000000..f8fb07b0b6b8
--- /dev/null
+++ b/include/linux/fips.h
@@ -0,0 +1,10 @@
+#ifndef _FIPS_H
+#define _FIPS_H
+
+#ifdef CONFIG_CRYPTO_FIPS
+extern int fips_enabled;
+#else
+#define fips_enabled 0
+#endif
+
+#endif
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 9823946adbc5..192d1e43c43c 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -127,6 +127,7 @@ struct fw_card {
struct delayed_work work;
int bm_retries;
int bm_generation;
+ __be32 bm_transaction_data[2];
bool broadcast_channel_allocated;
u32 broadcast_channel;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0872372184fe..8f0478c8e1ad 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -715,7 +715,7 @@ struct posix_acl;
struct inode {
struct hlist_node i_hash;
- struct list_head i_list;
+ struct list_head i_list; /* backing dev IO list */
struct list_head i_sb_list;
struct list_head i_dentry;
unsigned long i_ino;
@@ -1336,9 +1336,6 @@ struct super_block {
struct xattr_handler **s_xattr;
struct list_head s_inodes; /* all inodes */
- struct list_head s_dirty; /* dirty inodes */
- struct list_head s_io; /* parked for writeback */
- struct list_head s_more_io; /* parked for more writeback */
struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_files;
/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
@@ -1555,11 +1552,14 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
unsigned long, loff_t *);
+struct bdi_writeback;
+
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
void (*dirty_inode) (struct inode *);
+ struct bdi_writeback *(*inode_get_wb) (struct inode *);
int (*write_inode) (struct inode *, int);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
@@ -2071,6 +2071,8 @@ extern int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern void generic_sync_sb_inodes(struct super_block *sb,
struct writeback_control *wbc);
+extern void generic_sync_bdi_inodes(struct super_block *sb,
+ struct writeback_control *);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
@@ -2184,7 +2186,6 @@ extern int bdev_read_only(struct block_device *);
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
-extern int sb_has_dirty_inodes(struct super_block *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 44848aa830dc..ef415139d555 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -58,9 +58,13 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
+#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
+
/* listeners that hard code group numbers near the top */
#define DNOTIFY_GROUP_NUM UINT_MAX
-#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
+#define AUDIT_WATCH_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
+#define AUDIT_TREE_GROUP_NUM (AUDIT_WATCH_GROUP_NUM-1)
+#define INOTIFY_GROUP_NUM (AUDIT_TREE_GROUP_NUM-1)
struct fsnotify_group;
struct fsnotify_event;
@@ -280,7 +284,7 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
assert_spin_locked(&dentry->d_lock);
parent = dentry->d_parent;
- if (fsnotify_inode_watches_children(parent->d_inode))
+ if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
@@ -339,8 +343,10 @@ extern void fsnotify_recalc_inode_mask(struct inode *inode);
extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
/* find (and take a reference) to a mark associated with group and inode */
extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
+/* copy the values from old into new */
+extern void fsnotify_duplicate_mark(struct fsnotify_mark_entry *new, struct fsnotify_mark_entry *old);
/* attach the mark to both the group and the inode */
-extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
+extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */
extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
/* run all the marks in a group, and flag them to be freed */
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index d41ed593f79f..cf593bf9fd32 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -25,6 +25,11 @@
* - add IOCTL message
* - add unsolicited notification support
* - add POLL message and NOTIFY_POLL notification
+ *
+ * 7.12
+ * - add umask flag to input argument of open, mknod and mkdir
+ * - add notification messages for invalidation of inodes and
+ * directory entries
*/
#ifndef _LINUX_FUSE_H
@@ -36,7 +41,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 11
+#define FUSE_KERNEL_MINOR_VERSION 12
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -112,6 +117,7 @@ struct fuse_file_lock {
* INIT request/reply flags
*
* FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
+ * FUSE_DONT_MASK: don't apply umask to file mode on create operations
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -119,6 +125,7 @@ struct fuse_file_lock {
#define FUSE_ATOMIC_O_TRUNC (1 << 3)
#define FUSE_EXPORT_SUPPORT (1 << 4)
#define FUSE_BIG_WRITES (1 << 5)
+#define FUSE_DONT_MASK (1 << 6)
/**
* CUSE INIT request/reply flags
@@ -224,6 +231,8 @@ enum fuse_opcode {
enum fuse_notify_code {
FUSE_NOTIFY_POLL = 1,
+ FUSE_NOTIFY_INVAL_INODE = 2,
+ FUSE_NOTIFY_INVAL_ENTRY = 3,
FUSE_NOTIFY_CODE_MAX,
};
@@ -262,14 +271,18 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+#define FUSE_COMPAT_MKNOD_IN_SIZE 8
+
struct fuse_mknod_in {
__u32 mode;
__u32 rdev;
+ __u32 umask;
+ __u32 padding;
};
struct fuse_mkdir_in {
__u32 mode;
- __u32 padding;
+ __u32 umask;
};
struct fuse_rename_in {
@@ -301,7 +314,14 @@ struct fuse_setattr_in {
struct fuse_open_in {
__u32 flags;
+ __u32 unused;
+};
+
+struct fuse_create_in {
+ __u32 flags;
__u32 mode;
+ __u32 umask;
+ __u32 padding;
};
struct fuse_open_out {
@@ -508,4 +528,16 @@ struct fuse_dirent {
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
+struct fuse_notify_inval_inode_out {
+ __u64 ino;
+ __s64 off;
+ __s64 len;
+};
+
+struct fuse_notify_inval_entry_out {
+ __u64 parent;
+ __u32 namelen;
+ __u32 padding;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 50d568ec178a..53744fa1c8b7 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -2,7 +2,7 @@
#define __HID_DEBUG_H
/*
- * Copyright (c) 2007 Jiri Kosina
+ * Copyright (c) 2007-2009 Jiri Kosina
*/
/*
@@ -22,24 +22,44 @@
*
*/
-#ifdef CONFIG_HID_DEBUG
+#define HID_DEBUG_BUFSIZE 512
-void hid_dump_input(struct hid_usage *, __s32);
-void hid_dump_device(struct hid_device *);
-void hid_dump_field(struct hid_field *, int);
-void hid_resolv_usage(unsigned);
-void hid_resolv_event(__u8, __u16);
+#ifdef CONFIG_DEBUG_FS
+
+void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
+void hid_dump_device(struct hid_device *, struct seq_file *);
+void hid_dump_field(struct hid_field *, int, struct seq_file *);
+char *hid_resolv_usage(unsigned, struct seq_file *);
+void hid_debug_register(struct hid_device *, const char *);
+void hid_debug_unregister(struct hid_device *);
+void hid_debug_init(void);
+void hid_debug_exit(void);
+void hid_debug_event(struct hid_device *, char *);
-#else
-#define hid_dump_input(a,b) do { } while (0)
-#define hid_dump_device(c) do { } while (0)
-#define hid_dump_field(a,b) do { } while (0)
-#define hid_resolv_usage(a) do { } while (0)
-#define hid_resolv_event(a,b) do { } while (0)
+struct hid_debug_list {
+ char *hid_debug_buf;
+ int head;
+ int tail;
+ struct fasync_struct *fasync;
+ struct hid_device *hdev;
+ struct list_head node;
+ struct mutex read_mutex;
+};
-#endif /* CONFIG_HID_DEBUG */
+#else
+#define hid_dump_input(a,b,c) do { } while (0)
+#define hid_dump_device(a,b) do { } while (0)
+#define hid_dump_field(a,b,c) do { } while (0)
+#define hid_resolv_usage(a,b) do { } while (0)
+#define hid_debug_register(a, b) do { } while (0)
+#define hid_debug_unregister(a) do { } while (0)
+#define hid_debug_init() do { } while (0)
+#define hid_debug_exit() do { } while (0)
+#define hid_debug_event(a,b) do { } while (0)
+
+#endif
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 53489fd4d700..a0ebdace7baa 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -500,6 +500,14 @@ struct hid_device { /* device report descriptor */
/* handler for raw output data, used by hidraw */
int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t);
+
+ /* debugging support via debugfs */
+ unsigned short debug;
+ struct dentry *debug_dir;
+ struct dentry *debug_rdesc;
+ struct dentry *debug_events;
+ struct list_head debug_list;
+ wait_queue_head_t debug_wait;
};
static inline void *hid_get_drvdata(struct hid_device *hdev)
@@ -657,9 +665,7 @@ struct hid_ll_driver {
/* HID core API */
-#ifdef CONFIG_HID_DEBUG
extern int hid_debug;
-#endif
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
@@ -815,21 +821,9 @@ int hid_pidff_init(struct hid_device *hid);
#define hid_pidff_init NULL
#endif
-#ifdef CONFIG_HID_DEBUG
#define dbg_hid(format, arg...) if (hid_debug) \
printk(KERN_DEBUG "%s: " format ,\
__FILE__ , ## arg)
-#define dbg_hid_line(format, arg...) if (hid_debug) \
- printk(format, ## arg)
-#else
-static inline int __attribute__((format(printf, 1, 2)))
-dbg_hid(const char *fmt, ...)
-{
- return 0;
-}
-#define dbg_hid_line dbg_hid
-#endif /* HID_DEBUG */
-
#define err_hid(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
__FILE__ , ## arg)
#endif /* HID_FF */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7400900de94a..54648e625efd 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/percpu.h>
+#include <linux/timer.h>
struct hrtimer_clock_base;
@@ -447,6 +448,8 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{
+ if (likely(!timer->start_pid))
+ return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0);
}
@@ -456,6 +459,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index c9087de5c6c6..e844a0b18695 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -28,17 +28,6 @@
identify a legacy client. If you don't need them, just don't set them. */
/*
- * ---- Driver types -----------------------------------------------------
- */
-
-#define I2C_DRIVERID_MSP3400 1
-#define I2C_DRIVERID_TUNER 2
-#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
-#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
-#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
-#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
-
-/*
* ---- Adapter types ----------------------------------------------------
*/
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f4784c0fe975..57d41b0abce2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -98,7 +98,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
/**
* struct i2c_driver - represent an I2C device driver
- * @id: Unique driver ID (optional)
* @class: What kind of i2c device we instantiate (for detect)
* @attach_adapter: Callback for bus addition (for legacy drivers)
* @detach_adapter: Callback for bus removal (for legacy drivers)
@@ -135,7 +134,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* not allowed.
*/
struct i2c_driver {
- int id;
unsigned int class;
/* Notifies the driver that a new bus has appeared or is about to be
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 95c6e00a72e8..edc93a6d931d 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1062,7 +1062,6 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
extern int ide_vlb_clk;
extern int ide_pci_clk;
-unsigned int ide_rq_bytes(struct request *);
int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
void ide_kill_rq(ide_drive_t *, struct request *);
@@ -1361,7 +1360,6 @@ int ide_in_drive_list(u16 *, const struct drive_list_entry *);
#ifdef CONFIG_BLK_DEV_IDEDMA
int ide_dma_good_drive(ide_drive_t *);
int __ide_dma_bad_drive(ide_drive_t *);
-int ide_id_dma_bug(ide_drive_t *);
u8 ide_find_dma_mode(ide_drive_t *, u8);
@@ -1402,7 +1400,6 @@ void ide_dma_lost_irq(ide_drive_t *);
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
#else
-static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
@@ -1422,6 +1419,7 @@ static inline void ide_dma_unmap_sg(ide_drive_t *drive,
#ifdef CONFIG_BLK_DEV_IDEACPI
int ide_acpi_init(void);
+bool ide_port_acpi(ide_hwif_t *hwif);
extern int ide_acpi_exec_tfs(ide_drive_t *drive);
extern void ide_acpi_get_timing(ide_hwif_t *hwif);
extern void ide_acpi_push_timing(ide_hwif_t *hwif);
@@ -1430,6 +1428,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *);
extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
#else
static inline int ide_acpi_init(void) { return 0; }
+static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index ae3a1871413d..70fdba2bbf71 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -78,6 +78,7 @@
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_TIPC 0x88CA /* TIPC */
+#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index b1b827d091a9..0e3f2a4c25f6 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -24,6 +24,7 @@ extern int ima_path_check(struct path *path, int mask, int update_counts);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_counts_get(struct file *file);
+extern void ima_counts_put(struct path *path, int mask);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -60,5 +61,10 @@ static inline void ima_counts_get(struct file *file)
{
return;
}
+
+static inline void ima_counts_put(struct path *path, int mask)
+{
+ return;
+}
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 5368fbdc7801..7fc01b13be43 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -183,5 +183,8 @@ extern struct cred init_cred;
LIST_HEAD_INIT(cpu_timers[2]), \
}
+/* Attach to the init_task data structure for proper alignment */
+#define __init_task_data __attribute__((__section__(".data.init_task")))
+
#endif
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
new file mode 100644
index 000000000000..7964516c6954
--- /dev/null
+++ b/include/linux/input/matrix_keypad.h
@@ -0,0 +1,65 @@
+#ifndef _MATRIX_KEYPAD_H
+#define _MATRIX_KEYPAD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+#define MATRIX_MAX_ROWS 16
+#define MATRIX_MAX_COLS 16
+
+#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
+ (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
+ (val & 0xffff))
+
+#define KEY_ROW(k) (((k) >> 24) & 0xff)
+#define KEY_COL(k) (((k) >> 16) & 0xff)
+#define KEY_VAL(k) ((k) & 0xffff)
+
+/**
+ * struct matrix_keymap_data - keymap for matrix keyboards
+ * @keymap: pointer to array of uint32 values encoded with KEY() macro
+ * representing keymap
+ * @keymap_size: number of entries (initialized) in this keymap
+ * @max_keymap_size: maximum size of keymap supported by the device
+ *
+ * This structure is supposed to be used by platform code to supply
+ * keymaps to drivers that implement matrix-like keypads/keyboards.
+ */
+struct matrix_keymap_data {
+ const uint32_t *keymap;
+ unsigned int keymap_size;
+ unsigned int max_keymap_size;
+};
+
+/**
+ * struct matrix_keypad_platform_data - platform-dependent keypad data
+ * @keymap_data: pointer to &matrix_keymap_data
+ * @row_gpios: array of gpio numbers reporesenting rows
+ * @col_gpios: array of gpio numbers reporesenting colums
+ * @num_row_gpios: actual number of row gpios used by device
+ * @num_col_gpios: actual number of col gpios used by device
+ * @col_scan_delay_us: delay, measured in microseconds, that is
+ * needed before we can keypad after activating column gpio
+ * @debounce_ms: debounce interval in milliseconds
+ *
+ * This structure represents platform-specific data that use used by
+ * matrix_keypad driver to perform proper initialization.
+ */
+struct matrix_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+
+ unsigned int row_gpios[MATRIX_MAX_ROWS];
+ unsigned int col_gpios[MATRIX_MAX_COLS];
+ unsigned int num_row_gpios;
+ unsigned int num_col_gpios;
+
+ unsigned int col_scan_delay_us;
+
+ /* key debounce interval in milli-second */
+ unsigned int debounce_ms;
+
+ bool active_low;
+ bool wakeup;
+};
+
+#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2721f07e9354..80b776871a9b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -77,7 +77,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* struct irqaction - per interrupt action descriptor
* @handler: interrupt handler function
* @flags: flags (see IRQF_* above)
- * @mask: no comment as it is useless and about to be removed
* @name: name of the device
* @dev_id: cookie to identify the device
* @next: pointer to the next irqaction for shared interrupts
@@ -90,7 +89,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
struct irqaction {
irq_handler_t handler;
unsigned long flags;
- cpumask_t mask;
const char *name;
void *dev_id;
struct irqaction *next;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a3f7f7..6956df9961ab 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -220,13 +220,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
/*
- * Migration helpers for obsolete names, they will go away:
- */
-#define hw_interrupt_type irq_chip
-#define no_irq_type no_irq_chip
-typedef struct irq_desc irq_desc_t;
-
-/*
* Pick up the arch-dependent methods:
*/
#include <asm/hw_irq.h>
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fac104e7186a..d6320a3e8def 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -303,6 +303,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
extern int panic_timeout;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
extern const char *print_tainted(void);
extern void add_taint(unsigned flag);
extern int test_taint(unsigned flag);
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 29f62e1733ff..ad6bdf5a5970 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -38,7 +38,7 @@ extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
spinlock_t *lock);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int __kfifo_put(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len);
+ const unsigned char *buffer, unsigned int len);
extern unsigned int __kfifo_get(struct kfifo *fifo,
unsigned char *buffer, unsigned int len);
@@ -77,7 +77,7 @@ static inline void kfifo_reset(struct kfifo *fifo)
* bytes copied.
*/
static inline unsigned int kfifo_put(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len)
+ const unsigned char *buffer, unsigned int len)
{
unsigned long flags;
unsigned int ret;
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6adcc297e354..cedae0b71192 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -29,8 +29,7 @@ struct pt_regs;
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
- * This can be implemented in the architecture specific portion of
- * for kgdb.
+ * This can be implemented in the architecture specific portion of kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
@@ -65,7 +64,7 @@ struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
- * This will be impelmented a static inline per architecture. This
+ * This will be implemented as a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
@@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
- * and get them be in a known state. This should do what is needed
+ * and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
@@ -267,8 +266,45 @@ extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
-extern int kgdb_mem2hex(char *mem, char *buf, int count);
-extern int kgdb_hex2mem(char *buf, char *mem, int count);
+
+/**
+ * kgdb_mem2hex - (optional arch override) translate bin to hex chars
+ * @mem: source buffer
+ * @buf: target buffer
+ * @count: number of bytes in mem
+ *
+ * Architectures which do not support probe_kernel_(read|write),
+ * can make an alternate implementation of this function.
+ * This function safely reads memory into hex
+ * characters for use with the kgdb protocol.
+ */
+extern int __weak kgdb_mem2hex(char *mem, char *buf, int count);
+
+/**
+ * kgdb_hex2mem - (optional arch override) translate hex chars to bin
+ * @buf: source buffer
+ * @mem: target buffer
+ * @count: number of bytes in mem
+ *
+ * Architectures which do not support probe_kernel_(read|write),
+ * can make an alternate implementation of this function.
+ * This function safely writes hex characters into memory
+ * for use with the kgdb protocol.
+ */
+extern int __weak kgdb_hex2mem(char *buf, char *mem, int count);
+
+/**
+ * kgdb_ebin2mem - (optional arch override) Copy binary array from @buf into @mem
+ * @buf: source buffer
+ * @mem: target buffer
+ * @count: number of bytes in mem
+ *
+ * Architectures which do not support probe_kernel_(read|write),
+ * can make an alternate implementation of this function.
+ * This function safely copies binary array into memory
+ * for use with the kgdb protocol.
+ */
+extern int __weak kgdb_ebin2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 3db5d8d37485..76c640834ea6 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -14,7 +14,7 @@
#define KVM_API_VERSION 12
-/* for KVM_TRACE_ENABLE */
+/* for KVM_TRACE_ENABLE, deprecated */
struct kvm_user_trace_setup {
__u32 buf_size; /* sub_buffer size of each per-cpu */
__u32 buf_nr; /* the number of sub_buffers of each per-cpu */
@@ -70,6 +70,14 @@ struct kvm_irqchip {
} chip;
};
+/* for KVM_CREATE_PIT2 */
+struct kvm_pit_config {
+ __u32 flags;
+ __u32 pad[15];
+};
+
+#define KVM_PIT_SPEAKER_DUMMY 1
+
#define KVM_EXIT_UNKNOWN 0
#define KVM_EXIT_EXCEPTION 1
#define KVM_EXIT_IO 2
@@ -87,6 +95,10 @@ struct kvm_irqchip {
#define KVM_EXIT_S390_RESET 14
#define KVM_EXIT_DCR 15
#define KVM_EXIT_NMI 16
+#define KVM_EXIT_INTERNAL_ERROR 17
+
+/* For KVM_EXIT_INTERNAL_ERROR */
+#define KVM_INTERNAL_ERROR_EMULATION 1
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -173,6 +185,9 @@ struct kvm_run {
__u32 data;
__u8 is_write;
} dcr;
+ struct {
+ __u32 suberror;
+ } internal;
/* Fix the size of the union. */
char padding[256];
};
@@ -310,35 +325,6 @@ struct kvm_guest_debug {
#define KVM_TRC_CYCLE_SIZE 8
#define KVM_TRC_EXTRA_MAX 7
-/* This structure represents a single trace buffer record. */
-struct kvm_trace_rec {
- /* variable rec_val
- * is split into:
- * bits 0 - 27 -> event id
- * bits 28 -30 -> number of extra data args of size u32
- * bits 31 -> binary indicator for if tsc is in record
- */
- __u32 rec_val;
- __u32 pid;
- __u32 vcpu_id;
- union {
- struct {
- __u64 timestamp;
- __u32 extra_u32[KVM_TRC_EXTRA_MAX];
- } __attribute__((packed)) timestamp;
- struct {
- __u32 extra_u32[KVM_TRC_EXTRA_MAX];
- } notimestamp;
- } u;
-};
-
-#define TRACE_REC_EVENT_ID(val) \
- (0x0fffffff & (val))
-#define TRACE_REC_NUM_DATA_ARGS(val) \
- (0x70000000 & ((val) << 28))
-#define TRACE_REC_TCS(val) \
- (0x80000000 & ((val) << 31))
-
#define KVMIO 0xAE
/*
@@ -415,6 +401,14 @@ struct kvm_trace_rec {
#define KVM_CAP_ASSIGN_DEV_IRQ 29
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
+#ifdef __KVM_HAVE_MCE
+#define KVM_CAP_MCE 31
+#endif
+#define KVM_CAP_IRQFD 32
+#ifdef __KVM_HAVE_PIT
+#define KVM_CAP_PIT2 33
+#endif
+#define KVM_CAP_SET_BOOT_CPU_ID 34
#ifdef KVM_CAP_IRQ_ROUTING
@@ -454,15 +448,32 @@ struct kvm_irq_routing {
#endif
+#ifdef KVM_CAP_MCE
+/* x86 MCE */
+struct kvm_x86_mce {
+ __u64 status;
+ __u64 addr;
+ __u64 misc;
+ __u64 mcg_status;
+ __u8 bank;
+ __u8 pad1[7];
+ __u64 pad2[3];
+};
+#endif
+
+#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+struct kvm_irqfd {
+ __u32 fd;
+ __u32 gsi;
+ __u32 flags;
+ __u8 pad[20];
+};
+
/*
* ioctls for VM fds
*/
#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
-#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
-#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
-#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\
- struct kvm_userspace_memory_region)
-#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
@@ -470,6 +481,11 @@ struct kvm_irq_routing {
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
+#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
+#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\
+ struct kvm_userspace_memory_region)
+#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
@@ -498,6 +514,9 @@ struct kvm_irq_routing {
#define KVM_ASSIGN_SET_MSIX_ENTRY \
_IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry)
#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
+#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
+#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
+#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
/*
* ioctls for vcpu fds
@@ -541,6 +560,10 @@ struct kvm_irq_routing {
#define KVM_NMI _IO(KVMIO, 0x9a)
/* Available with KVM_CAP_SET_GUEST_DEBUG */
#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
+/* MCE for x86 */
+#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
+#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
+#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
/*
* Deprecated interfaces
@@ -633,7 +656,7 @@ struct kvm_assigned_msix_nr {
__u16 padding;
};
-#define KVM_MAX_MSIX_PER_DEV 512
+#define KVM_MAX_MSIX_PER_DEV 256
struct kvm_assigned_msix_entry {
__u32 assigned_dev_id;
__u32 gsi;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index aacc5449f586..8e04a3452e88 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -42,6 +42,7 @@
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
+struct kvm;
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
@@ -59,9 +60,13 @@ struct kvm_io_bus {
void kvm_io_bus_init(struct kvm_io_bus *bus);
void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
- gpa_t addr, int len, int is_write);
-void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
+ const void *val);
+int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
+ void *val);
+void __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+ struct kvm_io_device *dev);
+void kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
struct kvm_io_device *dev);
struct kvm_vcpu {
@@ -103,7 +108,7 @@ struct kvm_memory_slot {
struct {
unsigned long rmap_pde;
int write_count;
- } *lpage_info;
+ } *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr;
int user_alloc;
};
@@ -123,17 +128,29 @@ struct kvm_kernel_irq_routing_entry {
};
struct kvm {
- struct mutex lock; /* protects the vcpus array and APIC accesses */
spinlock_t mmu_lock;
+ spinlock_t requests_lock;
struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+ u32 bsp_vcpu_id;
+ struct kvm_vcpu *bsp_vcpu;
+#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ atomic_t online_vcpus;
struct list_head vm_list;
+ struct mutex lock;
struct kvm_io_bus mmio_bus;
struct kvm_io_bus pio_bus;
+#ifdef CONFIG_HAVE_KVM_EVENTFD
+ struct {
+ spinlock_t lock;
+ struct list_head items;
+ } irqfds;
+#endif
struct kvm_vm_stat stat;
struct kvm_arch arch;
atomic_t users_count;
@@ -142,6 +159,7 @@ struct kvm {
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
+ struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
struct hlist_head mask_notifier_list;
@@ -165,6 +183,17 @@ struct kvm {
#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+{
+ smp_rmb();
+ return kvm->vcpus[i];
+}
+
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
+ idx < atomic_read(&kvm->online_vcpus) && vcpup; \
+ vcpup = kvm_get_vcpu(kvm, ++idx))
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -199,6 +228,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
+void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
@@ -241,8 +271,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_dev_ioctl_check_extension(long ext);
@@ -360,11 +388,17 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
+#ifdef __KVM_HAVE_IOAPIC
+void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
+ union kvm_ioapic_redirect_entry *entry,
+ unsigned long *deliver_bitmask);
+#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
-void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
+void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
+ struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
@@ -457,37 +491,6 @@ struct kvm_stats_debugfs_item {
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
-#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 5, d1, d2, d3, d4, d5)
-#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 4, d1, d2, d3, d4, 0)
-#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 3, d1, d2, d3, 0, 0)
-#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 2, d1, d2, 0, 0, 0)
-#define KVMTRACE_1D(evt, vcpu, d1, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 1, d1, 0, 0, 0, 0)
-#define KVMTRACE_0D(evt, vcpu, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 0, 0, 0, 0, 0, 0)
-
-#ifdef CONFIG_KVM_TRACE
-int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
-void kvm_trace_cleanup(void);
-#else
-static inline
-int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
-{
- return -EINVAL;
-}
-#define kvm_trace_cleanup() ((void)0)
-#endif
-
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
{
@@ -523,4 +526,28 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
#endif
+#ifdef CONFIG_HAVE_KVM_EVENTFD
+
+void kvm_irqfd_init(struct kvm *kvm);
+int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+void kvm_irqfd_release(struct kvm *kvm);
+
+#else
+
+static inline void kvm_irqfd_init(struct kvm *kvm) {}
+static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+{
+ return -EINVAL;
+}
+
+static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+#endif /* CONFIG_HAVE_KVM_EVENTFD */
+
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
+}
+#endif
#endif
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
new file mode 100644
index 000000000000..afc9f9fd70f5
--- /dev/null
+++ b/include/linux/leds-lp3944.h
@@ -0,0 +1,53 @@
+/*
+ * leds-lp3944.h - platform data structure for lp3944 led controller
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_LP3944_H
+#define __LINUX_LEDS_LP3944_H
+
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+
+#define LP3944_LED0 0
+#define LP3944_LED1 1
+#define LP3944_LED2 2
+#define LP3944_LED3 3
+#define LP3944_LED4 4
+#define LP3944_LED5 5
+#define LP3944_LED6 6
+#define LP3944_LED7 7
+#define LP3944_LEDS_MAX 8
+
+#define LP3944_LED_STATUS_MASK 0x03
+enum lp3944_status {
+ LP3944_LED_STATUS_OFF = 0x0,
+ LP3944_LED_STATUS_ON = 0x1,
+ LP3944_LED_STATUS_DIM0 = 0x2,
+ LP3944_LED_STATUS_DIM1 = 0x3
+};
+
+enum lp3944_type {
+ LP3944_LED_TYPE_NONE,
+ LP3944_LED_TYPE_LED,
+ LP3944_LED_TYPE_LED_INVERTED,
+};
+
+struct lp3944_led {
+ char *name;
+ enum lp3944_type type;
+ enum lp3944_status status;
+};
+
+struct lp3944_platform_data {
+ struct lp3944_led leds[LP3944_LEDS_MAX];
+ u8 leds_size;
+};
+
+#endif /* __LINUX_LEDS_LP3944_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 376fe07732ea..d8bf9665e70c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -45,7 +45,10 @@ struct led_classdev {
/* Get LED brightness level */
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
- /* Activate hardware accelerated blink */
+ /* Activate hardware accelerated blink, delays are in
+ * miliseconds and if none is provided then a sensible default
+ * should be chosen. The call can adjust the timings if it can't
+ * match the values specified exactly. */
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -141,9 +144,14 @@ struct gpio_led {
const char *name;
const char *default_trigger;
unsigned gpio;
- u8 active_low : 1;
- u8 retain_state_suspended : 1;
+ unsigned active_low : 1;
+ unsigned retain_state_suspended : 1;
+ unsigned default_state : 2;
+ /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
};
+#define LEDS_GPIO_DEFSTATE_OFF 0
+#define LEDS_GPIO_DEFSTATE_ON 1
+#define LEDS_GPIO_DEFSTATE_KEEP 2
struct gpio_led_platform_data {
int num_leds;
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 7bc1440fc473..dbf2479e808e 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -11,7 +11,7 @@
#define LG_CLOCK_MIN_DELTA 100UL
#define LG_CLOCK_MAX_DELTA ULONG_MAX
-/*G:032 The second method of communicating with the Host is to via "struct
+/*G:031 The second method of communicating with the Host is to via "struct
* lguest_data". Once the Guest's initialization hypercall tells the Host where
* this is, the Guest and Host both publish information in it. :*/
struct lguest_data
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index fee9e59649c1..691f59171c6c 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -22,6 +22,15 @@
#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE)
/*
+ * For assembly routines.
+ *
+ * Note when using these that you must specify the appropriate
+ * alignment directives yourself
+ */
+#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw"
+#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw"
+
+/*
* This is used by architectures to keep arguments on the stack
* untouched by the compiler by keeping them live until the end.
* The argument stack may be owned by the assembly-language
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index c12c3c0932bf..dec82b0b05f9 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -25,9 +25,12 @@ struct pcap_chip;
int ezx_pcap_write(struct pcap_chip *, u8, u32);
int ezx_pcap_read(struct pcap_chip *, u8, u32 *);
+int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32);
int pcap_to_irq(struct pcap_chip *, int);
+int irq_to_pcap(struct pcap_chip *, int);
int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *);
int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]);
+void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_SECOND_PORT 1
#define PCAP_CS_AH 2
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d006e93d5c93..ba3a7cb1eaa0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -826,7 +826,7 @@ extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d870ae2faedc..ec0f607b364a 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -40,7 +40,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
* - follow links at the end
* - require a directory
* - ending slashes ok even for nonexistent files
- * - internal "there are more path compnents" flag
+ * - internal "there are more path components" flag
* - locked when lookup done with dcache_lock held
* - dentry cache is untrusted; force a real lookup
*/
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 3430c7751948..7ae05338e94c 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -81,4 +81,17 @@ struct xt_conntrack_mtinfo1 {
__u8 state_mask, status_mask;
};
+struct xt_conntrack_mtinfo2 {
+ union nf_inet_addr origsrc_addr, origsrc_mask;
+ union nf_inet_addr origdst_addr, origdst_mask;
+ union nf_inet_addr replsrc_addr, replsrc_mask;
+ union nf_inet_addr repldst_addr, repldst_mask;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
+ __be16 origsrc_port, origdst_port;
+ __be16 replsrc_port, repldst_port;
+ __u16 match_flags, invert_flags;
+ __u16 state_mask, status_mask;
+};
+
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h
index fd2272e0959a..18afa495f973 100644
--- a/include/linux/netfilter/xt_osf.h
+++ b/include/linux/netfilter/xt_osf.h
@@ -20,6 +20,8 @@
#ifndef _XT_OSF_H
#define _XT_OSF_H
+#include <linux/types.h>
+
#define MAXGENRELEN 32
#define XT_OSF_GENRE (1<<0)
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1d9518bc4c58..d68d2ed94f15 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -171,7 +171,6 @@ struct op_sample;
struct op_entry {
struct ring_buffer_event *event;
struct op_sample *sample;
- unsigned long irq_flags;
unsigned long size;
unsigned long *data;
};
@@ -180,6 +179,7 @@ void oprofile_write_reserve(struct op_entry *entry,
struct pt_regs * const regs,
unsigned long pc, int code, int size);
int oprofile_add_data(struct op_entry *entry, unsigned long val);
+int oprofile_add_data64(struct op_entry *entry, u64 val);
int oprofile_write_commit(struct op_entry *entry);
#endif /* OPROFILE_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d304ddf412d0..115fb7ba5089 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1145,7 +1145,7 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
/* If you want to know what to call your pci_dev, ask this function.
* Again, it's a wrapper around the generic device.
*/
-static inline const char *pci_name(struct pci_dev *pdev)
+static inline const char *pci_name(const struct pci_dev *pdev)
{
return dev_name(&pdev->dev);
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a3b000365795..73b46b6b904f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2645,6 +2645,7 @@
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9901 0x9901
#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f921d74f49f..aefc2f12b48c 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -10,21 +10,70 @@
/*
* Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the
- * 'section' argument. This may be used to affect the parameters governing the
+ * 'sec' argument. This may be used to affect the parameters governing the
* variable's storage.
*
* NOTE! The sections for the DECLARE and for the DEFINE must match, lest
* linkage errors occur due the compiler generating the wrong code to access
* that section.
*/
-#define DECLARE_PER_CPU_SECTION(type, name, section) \
- extern \
- __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SECTION(type, name, section) \
- __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+#define __PCPU_ATTRS(sec) \
+ __attribute__((section(PER_CPU_BASE_SECTION sec))) \
+ PER_CPU_ATTRIBUTES
+
+#define __PCPU_DUMMY_ATTRS \
+ __attribute__((section(".discard"), unused))
+
+/*
+ * s390 and alpha modules require percpu variables to be defined as
+ * weak to force the compiler to generate GOT based external
+ * references for them. This is necessary because percpu sections
+ * will be located outside of the usually addressable area.
+ *
+ * This definition puts the following two extra restrictions when
+ * defining percpu variables.
+ *
+ * 1. The symbol must be globally unique, even the static ones.
+ * 2. Static percpu variables cannot be defined inside a function.
+ *
+ * Archs which need weak percpu definitions should define
+ * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ *
+ * To ensure that the generic code observes the above two
+ * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
+ * definition is used for all cases.
+ */
+#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+/*
+ * __pcpu_scope_* dummy variable is used to enforce scope. It
+ * receives the static modifier when it's used in front of
+ * DEFINE_PER_CPU() and will trigger build failure if
+ * DECLARE_PER_CPU() is used for the same variable.
+ *
+ * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
+ * such that hidden weak symbol collision, which will cause unrelated
+ * variables to share the same address, can be detected during build.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, sec) \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SECTION(type, name, sec) \
+ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
+ __typeof__(type) per_cpu__##name
+#else
+/*
+ * Normal declaration and definition macros.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, sec) \
+ extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SECTION(type, name, sec) \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
+ __typeof__(type) per_cpu__##name
+#endif
/*
* Variant on the per-CPU variable declaration/definition theme used for
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 26fd9d12f050..8ce91af4aa19 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,7 +34,7 @@
#ifdef CONFIG_SMP
-#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -57,19 +57,73 @@
#endif
extern void *pcpu_base_addr;
+extern const int *pcpu_unit_map;
-typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
-typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
+typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size);
+typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
+typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
+typedef void (*pcpu_fc_map_fn_t)(void *ptr, size_t size, void *addr);
-extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+extern size_t __init pcpu_setup_first_chunk(
size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size,
- void *base_addr,
- pcpu_populate_pte_fn_t populate_pte_fn);
+ ssize_t dyn_size, size_t unit_size,
+ void *base_addr, const int *unit_map);
extern ssize_t __init pcpu_embed_first_chunk(
size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size);
+ ssize_t dyn_size);
+
+extern ssize_t __init pcpu_4k_first_chunk(
+ size_t static_size, size_t reserved_size,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_populate_pte_fn_t populate_pte_fn);
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+extern int __init pcpu_lpage_build_unit_map(
+ size_t static_size, size_t reserved_size,
+ ssize_t *dyn_sizep, size_t *unit_sizep,
+ size_t lpage_size, int *unit_map,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
+
+extern ssize_t __init pcpu_lpage_first_chunk(
+ size_t static_size, size_t reserved_size,
+ size_t dyn_size, size_t unit_size,
+ size_t lpage_size, const int *unit_map,
+ int nr_units,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_map_fn_t map_fn);
+
+extern void *pcpu_lpage_remapped(void *kaddr);
+#else
+static inline int pcpu_lpage_build_unit_map(
+ size_t static_size, size_t reserved_size,
+ ssize_t *dyn_sizep, size_t *unit_sizep,
+ size_t lpage_size, int *unit_map,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
+{
+ return -EINVAL;
+}
+
+static inline ssize_t __init pcpu_lpage_first_chunk(
+ size_t static_size, size_t reserved_size,
+ size_t dyn_size, size_t unit_size,
+ size_t lpage_size, const int *unit_map,
+ int nr_units,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_map_fn_t map_fn)
+{
+ return -EINVAL;
+}
+
+static inline void *pcpu_lpage_remapped(void *kaddr)
+{
+ return NULL;
+}
+#endif
/*
* Use this to get to a cpu's version of the per-cpu object
@@ -80,7 +134,7 @@ extern ssize_t __init pcpu_embed_first_chunk(
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
struct percpu_data {
void *ptrs[1];
@@ -99,11 +153,15 @@ struct percpu_data {
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void __init setup_per_cpu_areas(void);
+#endif
+
#else /* CONFIG_SMP */
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
@@ -124,6 +182,8 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline void __init setup_per_cpu_areas(void) { }
+
#endif /* CONFIG_SMP */
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 89698d8aba5c..5e970c7d3fd5 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -178,8 +178,10 @@ struct perf_counter_attr {
mmap : 1, /* include mmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
+ inherit_stat : 1, /* per task counts */
+ enable_on_exec : 1, /* next exec enables */
- __reserved_1 : 53;
+ __reserved_1 : 51;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@@ -232,6 +234,14 @@ struct perf_counter_mmap_page {
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware counter identifier */
__s64 offset; /* add to hardware counter value */
+ __u64 time_enabled; /* time counter active */
+ __u64 time_running; /* time counter on cpu */
+
+ /*
+ * Hole for extension of the self monitor capabilities
+ */
+
+ __u64 __reserved[123]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
@@ -253,7 +263,6 @@ struct perf_counter_mmap_page {
#define PERF_EVENT_MISC_KERNEL (1 << 0)
#define PERF_EVENT_MISC_USER (2 << 0)
#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
struct perf_event_header {
__u32 type;
@@ -327,9 +336,18 @@ enum perf_event_type {
PERF_EVENT_FORK = 7,
/*
- * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_SAMPLE_*
- *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ * u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 parent_id; } && PERF_FORMAT_ID
+ * };
+ */
+ PERF_EVENT_READ = 8,
+
+ /*
* struct {
* struct perf_event_header header;
*
@@ -337,8 +355,9 @@ enum perf_event_type {
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u64 id; } && PERF_SAMPLE_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
*
* { u64 nr;
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
@@ -347,6 +366,9 @@ enum perf_event_type {
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
+ PERF_EVENT_SAMPLE = 9,
+
+ PERF_EVENT_MAX, /* non-ABI */
};
enum perf_callchain_context {
@@ -582,6 +604,7 @@ struct perf_counter_context {
int nr_counters;
int nr_active;
int is_active;
+ int nr_stat;
atomic_t refcount;
struct task_struct *task;
@@ -669,7 +692,16 @@ static inline int is_software_counter(struct perf_counter *counter)
(counter->attr.type != PERF_TYPE_HW_CACHE);
}
-extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+
+static inline void
+perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ if (atomic_read(&perf_swcounter_enabled[event]))
+ __perf_swcounter_event(event, nr, nmi, regs, addr);
+}
extern void __perf_counter_mmap(struct vm_area_struct *vma);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 8dc5123b6305..3c6675c2444b 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -22,6 +22,9 @@ struct platform_device {
struct resource * resource;
struct platform_device_id *id_entry;
+
+ /* arch specific additions */
+ struct pdev_archdata archdata;
};
#define platform_get_device_id(pdev) ((pdev)->id_entry)
@@ -57,8 +60,6 @@ struct platform_driver {
int (*remove)(struct platform_device *);
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
- int (*suspend_late)(struct platform_device *, pm_message_t state);
- int (*resume_early)(struct platform_device *);
int (*resume)(struct platform_device *);
struct device_driver driver;
struct platform_device_id *id_table;
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 6729f7dcd60e..229322707f5f 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -2,13 +2,25 @@
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 594c494ac3f0..4c7c6fc35487 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -39,6 +39,13 @@ enum {
};
enum {
+ POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
+ POWER_SUPPLY_CHARGE_TYPE_NONE,
+ POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
+ POWER_SUPPLY_CHARGE_TYPE_FAST,
+};
+
+enum {
POWER_SUPPLY_HEALTH_UNKNOWN = 0,
POWER_SUPPLY_HEALTH_GOOD,
POWER_SUPPLY_HEALTH_OVERHEAT,
@@ -58,9 +65,19 @@ enum {
POWER_SUPPLY_TECHNOLOGY_LiMn,
};
+enum {
+ POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0,
+ POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL,
+ POWER_SUPPLY_CAPACITY_LEVEL_LOW,
+ POWER_SUPPLY_CAPACITY_LEVEL_NORMAL,
+ POWER_SUPPLY_CAPACITY_LEVEL_HIGH,
+ POWER_SUPPLY_CAPACITY_LEVEL_FULL,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -89,6 +106,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_ENERGY_AVG,
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
diff --git a/include/linux/rcu_types.h b/include/linux/rcu_types.h
new file mode 100644
index 000000000000..fd3570d0e6c1
--- /dev/null
+++ b/include/linux/rcu_types.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_RCU_TYPES_H
+#define __LINUX_RCU_TYPES_H
+
+#ifdef __KERNEL__
+
+/**
+ * struct rcu_head - callback structure for use with RCU
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ */
+struct rcu_head {
+ struct rcu_head *next;
+ void (*func)(struct rcu_head *head);
+};
+
+#endif
+
+#endif
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
deleted file mode 100644
index bfd92e1e5d2c..000000000000
--- a/include/linux/rcuclassic.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (classic version)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2001
- *
- * Author: Dipankar Sarma <dipankar@in.ibm.com>
- *
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-
-#ifndef __LINUX_RCUCLASSIC_H
-#define __LINUX_RCUCLASSIC_H
-
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/* Global control variables for rcupdate callback mechanism. */
-struct rcu_ctrlblk {
- long cur; /* Current batch number. */
- long completed; /* Number of the last completed batch */
- long pending; /* Number of the last pending batch */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- unsigned long gp_start; /* Time at which GP started in jiffies. */
- unsigned long jiffies_stall;
- /* Time at which to check for CPU stalls. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
- int signaled;
-
- spinlock_t lock ____cacheline_internodealigned_in_smp;
- DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
- /* current batch to proceed. */
-} ____cacheline_internodealigned_in_smp;
-
-/* Is batch a before batch b ? */
-static inline int rcu_batch_before(long a, long b)
-{
- return (a - b) < 0;
-}
-
-/* Is batch a after batch b ? */
-static inline int rcu_batch_after(long a, long b)
-{
- return (a - b) > 0;
-}
-
-/* Per-CPU data for Read-Copy UPdate. */
-struct rcu_data {
- /* 1) quiescent state handling : */
- long quiescbatch; /* Batch # for grace period */
- int passed_quiesc; /* User-mode/idle loop etc. */
- int qs_pending; /* core waits for quiesc state */
-
- /* 2) batch handling */
- /*
- * if nxtlist is not NULL, then:
- * batch:
- * The batch # for the last entry of nxtlist
- * [*nxttail[1], NULL = *nxttail[2]):
- * Entries that batch # <= batch
- * [*nxttail[0], *nxttail[1]):
- * Entries that batch # <= batch - 1
- * [nxtlist, *nxttail[0]):
- * Entries that batch # <= batch - 2
- * The grace period for these entries has completed, and
- * the other grace-period-completed entries may be moved
- * here temporarily in rcu_process_callbacks().
- */
- long batch;
- struct rcu_head *nxtlist;
- struct rcu_head **nxttail[3];
- long qlen; /* # of queued callbacks */
- struct rcu_head *donelist;
- struct rcu_head **donetail;
- long blimit; /* Upper limit on a processed batch */
- int cpu;
- struct rcu_head barrier;
-};
-
-/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
- */
-extern void rcu_qsctr_inc(int cpu);
-extern void rcu_bh_qsctr_inc(int cpu);
-
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
-#else
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-#endif
-
-#define __rcu_read_lock() \
- do { \
- preempt_disable(); \
- __acquire(RCU); \
- rcu_read_acquire(); \
- } while (0)
-#define __rcu_read_unlock() \
- do { \
- rcu_read_release(); \
- __release(RCU); \
- preempt_enable(); \
- } while (0)
-#define __rcu_read_lock_bh() \
- do { \
- local_bh_disable(); \
- __acquire(RCU_BH); \
- rcu_read_acquire(); \
- } while (0)
-#define __rcu_read_unlock_bh() \
- do { \
- rcu_read_release(); \
- __release(RCU_BH); \
- local_bh_enable(); \
- } while (0)
-
-#define __synchronize_sched() synchronize_rcu()
-
-#define call_rcu_sched(head, func) call_rcu(head, func)
-
-extern void __rcu_init(void);
-#define rcu_init_sched() do { } while (0)
-extern void rcu_check_callbacks(int cpu, int user);
-extern void rcu_restart_cpu(int cpu);
-
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
-
-#define rcu_enter_nohz() do { } while (0)
-#define rcu_exit_nohz() do { } while (0)
-
-/* A context switch is a grace period for rcuclassic. */
-static inline int rcu_blocking_is_gp(void)
-{
- return num_online_cpus() == 1;
-}
-
-#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 15fbb3ca634d..f8cad2b20778 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -33,6 +33,7 @@
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
+#include <linux/rcu_types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
@@ -41,22 +42,22 @@
#include <linux/lockdep.h>
#include <linux/completion.h>
-/**
- * struct rcu_head - callback structure for use with RCU
- * @next: next update requests in a list
- * @func: actual update function to call after the grace period.
- */
-struct rcu_head {
- struct rcu_head *next;
- void (*func)(struct rcu_head *head);
-};
+/* Exported common interfaces */
+extern void synchronize_rcu(void);
+extern void synchronize_rcu_bh(void);
+extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
+extern void synchronize_sched_expedited(void);
+extern int sched_expedited_torture_stats(char *page);
-/* Internal to kernel, but needed by rcupreempt.h. */
+/* Internal to kernel */
+extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
+extern int rcu_needs_cpu(int cpu);
extern int rcu_scheduler_active;
-#if defined(CONFIG_CLASSIC_RCU)
-#include <linux/rcuclassic.h>
-#elif defined(CONFIG_TREE_RCU)
+#if defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_PREEMPT_RCU)
#include <linux/rcupreempt.h>
@@ -259,15 +260,4 @@ extern void call_rcu(struct rcu_head *head,
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
-/* Exported common interfaces */
-extern void synchronize_rcu(void);
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
-
-/* Internal to kernel */
-extern void rcu_init(void);
-extern void rcu_scheduler_starting(void);
-extern int rcu_needs_cpu(int cpu);
-
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index fce522782ffa..f164ac9b7807 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -74,6 +74,16 @@ extern int rcu_needs_cpu(int cpu);
extern void __synchronize_sched(void);
+static inline void synchronize_rcu_expedited(void)
+{
+ synchronize_rcu(); /* Placeholder for new rcupreempt implementation. */
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_rcu_bh(); /* Placeholder for new rcupreempt impl. */
+}
+
extern void __rcu_init(void);
extern void rcu_init_sched(void);
extern void rcu_check_callbacks(int cpu, int user);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5a5153806c42..d4dfd2489633 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -286,8 +286,14 @@ static inline void __rcu_read_unlock_bh(void)
#define call_rcu_sched(head, func) call_rcu(head, func)
-static inline void rcu_init_sched(void)
+static inline void synchronize_rcu_expedited(void)
+{
+ synchronize_sched_expedited();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
{
+ synchronize_sched_expedited();
}
extern void __rcu_init(void);
@@ -297,6 +303,10 @@ extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
+static inline void rcu_init_sched(void)
+{
+}
+
#ifdef CONFIG_NO_HZ
void rcu_enter_nohz(void);
void rcu_exit_nohz(void);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index bac64fa390f2..9328090eca20 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -126,13 +126,18 @@ struct regulation_constraints {
/**
* struct regulator_consumer_supply - supply -> device mapping
*
- * This maps a supply name to a device.
+ * This maps a supply name to a device. Only one of dev or dev_name
+ * can be specified. Use of dev_name allows support for buses which
+ * make struct device available late such as I2C and is the preferred
+ * form.
*
* @dev: Device structure for the consumer.
+ * @dev_name: Result of dev_name() for the consumer.
* @supply: Name for the supply.
*/
struct regulator_consumer_supply {
struct device *dev; /* consumer */
+ const char *dev_name; /* dev_name() for consumer */
const char *supply; /* consumer supply - e.g. "vcc" */
};
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index dd31e7bae35c..a498d9266d8c 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -52,11 +52,63 @@
#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
-/* Locking primitives */
-/* Right now we are still falling back to (un)lock_kernel, but eventually that
- would evolve into real per-fs locks */
-#define reiserfs_write_lock( sb ) lock_kernel()
-#define reiserfs_write_unlock( sb ) unlock_kernel()
+/*
+ * Locking primitives. The write lock is a per superblock
+ * special mutex that has properties close to the Big Kernel Lock
+ * which was used in the previous locking scheme.
+ */
+void reiserfs_write_lock(struct super_block *s);
+void reiserfs_write_unlock(struct super_block *s);
+int reiserfs_write_lock_once(struct super_block *s);
+void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
+
+/*
+ * Several mutexes depend on the write lock.
+ * However sometimes we want to relax the write lock while we hold
+ * these mutexes, according to the release/reacquire on schedule()
+ * properties of the Bkl that were used.
+ * Reiserfs performances and locking were based on this scheme.
+ * Now that the write lock is a mutex and not the bkl anymore, doing so
+ * may result in a deadlock:
+ *
+ * A acquire write_lock
+ * A acquire j_commit_mutex
+ * A release write_lock and wait for something
+ * B acquire write_lock
+ * B can't acquire j_commit_mutex and sleep
+ * A can't acquire write lock anymore
+ * deadlock
+ *
+ * What we do here is avoiding such deadlock by playing the same game
+ * than the Bkl: if we can't acquire a mutex that depends on the write lock,
+ * we release the write lock, wait a bit and then retry.
+ *
+ * The mutexes concerned by this hack are:
+ * - The commit mutex of a journal list
+ * - The flush mutex
+ * - The journal lock
+ * - The inode mutex
+ */
+static inline void reiserfs_mutex_lock_safe(struct mutex *m,
+ struct super_block *s)
+{
+ reiserfs_write_unlock(s);
+ mutex_lock(m);
+ reiserfs_write_lock(s);
+}
+
+/*
+ * When we schedule, we usually want to also release the write lock,
+ * according to the previous bkl based locking scheme of reiserfs.
+ */
+static inline void reiserfs_cond_resched(struct super_block *s)
+{
+ if (need_resched()) {
+ reiserfs_write_unlock(s);
+ schedule();
+ reiserfs_write_lock(s);
+ }
+}
struct fid;
@@ -1329,7 +1381,11 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
#define get_generation(s) atomic_read (&fs_generation(s))
#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
#define __fs_changed(gen,s) (gen != get_generation (s))
-#define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
+#define fs_changed(gen,s) \
+({ \
+ reiserfs_cond_resched(s); \
+ __fs_changed(gen, s); \
+})
/***************************************************************************/
/* FIXATE NODES */
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index dab68bbed675..52c83b6a758a 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -7,6 +7,8 @@
#ifdef __KERNEL__
#include <linux/workqueue.h>
#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
#endif
typedef enum {
@@ -355,6 +357,13 @@ struct reiserfs_sb_info {
struct reiserfs_journal *s_journal; /* pointer to journal information */
unsigned short s_mount_state; /* reiserfs state (valid, invalid) */
+ /* Serialize writers access, replace the old bkl */
+ struct mutex lock;
+ /* Owner of the lock (can be recursive) */
+ struct task_struct *lock_owner;
+ /* Depth of the lock, start from -1 like the bkl */
+ int lock_depth;
+
/* Comment? -Hans */
void (*end_io_handler) (struct buffer_head *, int);
hashf_t s_hash_function; /* pointer to function which is used
@@ -408,6 +417,17 @@ struct reiserfs_sb_info {
char *s_qf_names[MAXQUOTAS];
int s_jquota_fmt;
#endif
+#ifdef CONFIG_REISERFS_CHECK
+
+ struct tree_balance *cur_tb; /*
+ * Detects whether more than one
+ * copy of tb exists per superblock
+ * as a means of checking whether
+ * do_balance is executing concurrently
+ * against another tree reader/writer
+ * on a same mount point.
+ */
+#endif
};
/* Definitions of reiserfs on-disk properties: */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d0754269884..f1ca01fc0826 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,8 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
+#define SCHED_RESET_ON_FORK 0x40000000
#ifdef __KERNEL__
@@ -349,8 +351,20 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
struct nsproxy;
struct user_namespace;
-/* Maximum number of active map areas.. This is a random (large) number */
-#define DEFAULT_MAX_MAP_COUNT 65536
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
extern int sysctl_max_map_count;
@@ -1208,6 +1222,10 @@ struct task_struct {
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
+
+ /* Revert to default priority/policy when forking */
+ unsigned sched_reset_on_fork:1;
+
pid_t pid;
pid_t tgid;
@@ -1713,10 +1731,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return 0;
}
#endif
+
+#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
return set_cpus_allowed_ptr(p, &new_mask);
}
+#endif
/*
* Architectures can set this to 1 if they have specified
diff --git a/include/linux/security.h b/include/linux/security.h
index 5eff459b3833..145909165dbf 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -52,7 +52,7 @@ struct audit_krule;
extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
-extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
+extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
@@ -1209,7 +1209,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @alter contains the flag indicating whether changes are to be made.
* Return 0 if permission is granted.
*
- * @ptrace_may_access:
+ * @ptrace_access_check:
* Check permission before allowing the current process to trace the
* @child process.
* Security modules may also want to perform a process tracing check
@@ -1224,7 +1224,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself
* to the @parent process for tracing.
- * The parent process will still have to undergo the ptrace_may_access
+ * The parent process will still have to undergo the ptrace_access_check
* checks before it is allowed to trace this one.
* @parent contains the task_struct structure for debugger process.
* Return 0 if permission is granted.
@@ -1336,7 +1336,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
- int (*ptrace_may_access) (struct task_struct *child, unsigned int mode);
+ int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
int (*ptrace_traceme) (struct task_struct *parent);
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
@@ -1617,7 +1617,7 @@ extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
/* Security operations */
-int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
kernel_cap_t *effective,
@@ -1798,10 +1798,10 @@ static inline int security_init(void)
return 0;
}
-static inline int security_ptrace_may_access(struct task_struct *child,
+static inline int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
- return cap_ptrace_may_access(child, mode);
+ return cap_ptrace_access_check(child, mode);
}
static inline int security_ptrace_traceme(struct task_struct *parent)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index d4d2a78ad43e..fb46aba11fb5 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -22,6 +22,7 @@ struct plat_serial8250_port {
void __iomem *membase; /* ioremap cookie or NULL */
resource_size_t mapbase; /* resource base */
unsigned int irq; /* interrupt number */
+ unsigned long irqflags; /* request_irq flags */
unsigned int uartclk; /* UART clock rate */
void *private_data;
unsigned char regshift; /* register shift */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 23d2fb051f97..3cd255f0b211 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -265,6 +265,7 @@ struct uart_port {
unsigned int (*serial_in)(struct uart_port *, int);
void (*serial_out)(struct uart_port *, int, int);
unsigned int irq; /* irq number */
+ unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
unsigned int fifosize; /* tx fifo size */
unsigned char x_char; /* xon/xoff char */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2da8372519f5..62d2b16d9ff2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -74,6 +74,10 @@
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
+
+/* Following flags should only be used by allocator specific flags */
+#define SLAB_ALLOC_PRIVATE 0x000000ffUL
+
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -160,6 +164,8 @@ size_t ksize(const void *);
*/
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
+#elif defined(CONFIG_SLQB)
+#include <linux/slqb_def.h>
#elif defined(CONFIG_SLOB)
#include <linux/slob_def.h>
#else
@@ -262,7 +268,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || defined(CONFIG_SLQB_DEBUG)
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -280,7 +286,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || defined(CONFIG_SLQB_DEBUG)
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
diff --git a/include/linux/slqb_def.h b/include/linux/slqb_def.h
new file mode 100644
index 000000000000..1243dda50473
--- /dev/null
+++ b/include/linux/slqb_def.h
@@ -0,0 +1,301 @@
+#ifndef _LINUX_SLQB_DEF_H
+#define _LINUX_SLQB_DEF_H
+
+/*
+ * SLQB : A slab allocator with object queues.
+ *
+ * (C) 2008 Nick Piggin <npiggin@suse.de>
+ */
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/rcu_types.h>
+#include <linux/mm_types.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+
+#define SLAB_NUMA 0x00000001UL /* shortcut */
+
+enum stat_item {
+ ALLOC, /* Allocation count */
+ ALLOC_SLAB_FILL, /* Fill freelist from page list */
+ ALLOC_SLAB_NEW, /* New slab acquired from page allocator */
+ FREE, /* Free count */
+ FREE_REMOTE, /* NUMA: freeing to remote list */
+ FLUSH_FREE_LIST, /* Freelist flushed */
+ FLUSH_FREE_LIST_OBJECTS, /* Objects flushed from freelist */
+ FLUSH_FREE_LIST_REMOTE, /* Objects flushed from freelist to remote */
+ FLUSH_SLAB_PARTIAL, /* Freeing moves slab to partial list */
+ FLUSH_SLAB_FREE, /* Slab freed to the page allocator */
+ FLUSH_RFREE_LIST, /* Rfree list flushed */
+ FLUSH_RFREE_LIST_OBJECTS, /* Rfree objects flushed */
+ CLAIM_REMOTE_LIST, /* Remote freed list claimed */
+ CLAIM_REMOTE_LIST_OBJECTS, /* Remote freed objects claimed */
+ NR_SLQB_STAT_ITEMS
+};
+
+/*
+ * Singly-linked list with head, tail, and nr
+ */
+struct kmlist {
+ unsigned long nr;
+ void **head;
+ void **tail;
+};
+
+/*
+ * Every kmem_cache_list has a kmem_cache_remote_free structure, by which
+ * objects can be returned to the kmem_cache_list from remote CPUs.
+ */
+struct kmem_cache_remote_free {
+ spinlock_t lock;
+ struct kmlist list;
+} ____cacheline_aligned;
+
+/*
+ * A kmem_cache_list manages all the slabs and objects allocated from a given
+ * source. Per-cpu kmem_cache_lists allow node-local allocations. Per-node
+ * kmem_cache_lists allow off-node allocations (but require locking).
+ */
+struct kmem_cache_list {
+ /* Fastpath LIFO freelist of objects */
+ struct kmlist freelist;
+#ifdef CONFIG_SMP
+ /* remote_free has reached a watermark */
+ int remote_free_check;
+#endif
+ /* kmem_cache corresponding to this list */
+ struct kmem_cache *cache;
+
+ /* Number of partial slabs (pages) */
+ unsigned long nr_partial;
+
+ /* Slabs which have some free objects */
+ struct list_head partial;
+
+ /* Total number of slabs allocated */
+ unsigned long nr_slabs;
+
+ /* Protects nr_partial, nr_slabs, and partial */
+ spinlock_t page_lock;
+
+#ifdef CONFIG_SMP
+ /*
+ * In the case of per-cpu lists, remote_free is for objects freed by
+ * non-owner CPU back to its home list. For per-node lists, remote_free
+ * is always used to free objects.
+ */
+ struct kmem_cache_remote_free remote_free;
+#endif
+
+#ifdef CONFIG_SLQB_STATS
+ unsigned long stats[NR_SLQB_STAT_ITEMS];
+#endif
+} ____cacheline_aligned;
+
+/*
+ * Primary per-cpu, per-kmem_cache structure.
+ */
+struct kmem_cache_cpu {
+ struct kmem_cache_list list; /* List for node-local slabs */
+ unsigned int colour_next; /* Next colour offset to use */
+
+#ifdef CONFIG_SMP
+ /*
+ * rlist is a list of objects that don't fit on list.freelist (ie.
+ * wrong node). The objects all correspond to a given kmem_cache_list,
+ * remote_cache_list. To free objects to another list, we must first
+ * flush the existing objects, then switch remote_cache_list.
+ *
+ * An NR_CPUS or MAX_NUMNODES array would be nice here, but then we
+ * get to O(NR_CPUS^2) memory consumption situation.
+ */
+ struct kmlist rlist;
+ struct kmem_cache_list *remote_cache_list;
+#endif
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Per-node, per-kmem_cache structure. Used for node-specific allocations.
+ */
+struct kmem_cache_node {
+ struct kmem_cache_list list;
+ spinlock_t list_lock; /* protects access to list */
+} ____cacheline_aligned;
+
+/*
+ * Management object for a slab cache.
+ */
+struct kmem_cache {
+ unsigned long flags;
+ int hiwater; /* LIFO list high watermark */
+ int freebatch; /* LIFO freelist batch flush size */
+#ifdef CONFIG_SMP
+ struct kmem_cache_cpu **cpu_slab; /* dynamic per-cpu structures */
+#else
+ struct kmem_cache_cpu cpu_slab;
+#endif
+ int objsize; /* Size of object without meta data */
+ int offset; /* Free pointer offset. */
+ int objects; /* Number of objects in slab */
+
+#ifdef CONFIG_NUMA
+ struct kmem_cache_node **node_slab; /* dynamic per-node structures */
+#endif
+
+ int size; /* Size of object including meta data */
+ int order; /* Allocation order */
+ gfp_t allocflags; /* gfp flags to use on allocation */
+ unsigned int colour_range; /* range of colour counter */
+ unsigned int colour_off; /* offset per colour */
+ void (*ctor)(void *);
+
+ const char *name; /* Name (only for display!) */
+ struct list_head list; /* List of slab caches */
+
+ int align; /* Alignment */
+ int inuse; /* Offset to metadata */
+
+#ifdef CONFIG_SLQB_SYSFS
+ struct kobject kobj; /* For sysfs */
+#endif
+} ____cacheline_aligned;
+
+/*
+ * Kmalloc subsystem.
+ */
+#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#else
+#define KMALLOC_MIN_SIZE 8
+#endif
+
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
+#define KMALLOC_SHIFT_SLQB_HIGH (PAGE_SHIFT + \
+ ((9 <= (MAX_ORDER - 1)) ? 9 : (MAX_ORDER - 1)))
+
+extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_SLQB_HIGH + 1];
+extern struct kmem_cache kmalloc_caches_dma[KMALLOC_SHIFT_SLQB_HIGH + 1];
+
+/*
+ * Constant size allocations use this path to find index into kmalloc caches
+ * arrays. get_slab() function is used for non-constant sizes.
+ */
+static __always_inline int kmalloc_index(size_t size)
+{
+ extern int ____kmalloc_too_large(void);
+
+ if (unlikely(size <= KMALLOC_MIN_SIZE))
+ return KMALLOC_SHIFT_LOW;
+
+#if L1_CACHE_BYTES < 64
+ if (size > 64 && size <= 96)
+ return 1;
+#endif
+#if L1_CACHE_BYTES < 128
+ if (size > 128 && size <= 192)
+ return 2;
+#endif
+ if (size <= 8) return 3;
+ if (size <= 16) return 4;
+ if (size <= 32) return 5;
+ if (size <= 64) return 6;
+ if (size <= 128) return 7;
+ if (size <= 256) return 8;
+ if (size <= 512) return 9;
+ if (size <= 1024) return 10;
+ if (size <= 2 * 1024) return 11;
+ if (size <= 4 * 1024) return 12;
+ if (size <= 8 * 1024) return 13;
+ if (size <= 16 * 1024) return 14;
+ if (size <= 32 * 1024) return 15;
+ if (size <= 64 * 1024) return 16;
+ if (size <= 128 * 1024) return 17;
+ if (size <= 256 * 1024) return 18;
+ if (size <= 512 * 1024) return 19;
+ if (size <= 1024 * 1024) return 20;
+ if (size <= 2 * 1024 * 1024) return 21;
+ if (size <= 4 * 1024 * 1024) return 22;
+ if (size <= 8 * 1024 * 1024) return 23;
+ if (size <= 16 * 1024 * 1024) return 24;
+ if (size <= 32 * 1024 * 1024) return 25;
+ return ____kmalloc_too_large();
+}
+
+#ifdef CONFIG_ZONE_DMA
+#define SLQB_DMA __GFP_DMA
+#else
+/* Disable "DMA slabs" */
+#define SLQB_DMA (__force gfp_t)0
+#endif
+
+/*
+ * Find the kmalloc slab cache for a given combination of allocation flags and
+ * size. Should really only be used for constant 'size' arguments, due to
+ * bloat.
+ */
+static __always_inline struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+{
+ int index;
+
+ if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
+ return NULL;
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
+ index = kmalloc_index(size);
+ if (likely(!(flags & SLQB_DMA)))
+ return &kmalloc_caches[index];
+ else
+ return &kmalloc_caches_dma[index];
+}
+
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+void *__kmalloc(size_t size, gfp_t flags);
+
+#ifndef ARCH_KMALLOC_MINALIGN
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#define KMALLOC_HEADER (ARCH_KMALLOC_MINALIGN < sizeof(void *) ? \
+ sizeof(void *) : ARCH_KMALLOC_MINALIGN)
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ struct kmem_cache *s;
+
+ s = kmalloc_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return kmem_cache_alloc(s, flags);
+ }
+ return __kmalloc(size, flags);
+}
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size)) {
+ struct kmem_cache *s;
+
+ s = kmalloc_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return kmem_cache_alloc_node(s, flags, node);
+ }
+ return __kmalloc_node(size, flags, node);
+}
+#endif
+
+#endif /* _LINUX_SLQB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9e3d8af09207..39c64bae776d 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -73,15 +73,6 @@ int smp_call_function(void(*func)(void *info), void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
void (*func)(void *info), void *info, bool wait);
-/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
-static inline int
-smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
- int wait)
-{
- smp_call_function_many(&mask, func, info, wait);
- return 0;
-}
-
void __smp_call_function_single(int cpuid, struct call_single_data *data,
int wait);
@@ -144,8 +135,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
-#define smp_call_function_mask(mask, func, info, wait) \
- (up_smp_call_function(func, info))
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void init_call_single_data(void)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 9c4cd27f4685..c47c4b4da97e 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -80,6 +80,8 @@ struct spi_device {
#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
#define SPI_3WIRE 0x10 /* SI/SO signals shared */
#define SPI_LOOP 0x20 /* loopback mode */
+#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define SPI_READY 0x80 /* slave pulls low to pause */
u8 bits_per_word;
int irq;
void *controller_state;
@@ -248,6 +250,10 @@ struct spi_master {
/* spi_device.mode flags understood by this controller driver */
u16 mode_bits;
+ /* other constraints relevant to this driver */
+ u16 flags;
+#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
+
/* Setup mode and clock, etc (spi driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h
index 95251ccd5a07..bf0570a84f7a 100644
--- a/include/linux/spi/spidev.h
+++ b/include/linux/spi/spidev.h
@@ -40,6 +40,8 @@
#define SPI_LSB_FIRST 0x08
#define SPI_3WIRE 0x10
#define SPI_LOOP 0x20
+#define SPI_NO_CS 0x40
+#define SPI_READY 0x80
/*---------------------------------------------------------------------------*/
diff --git a/include/linux/timer.h b/include/linux/timer.h
index ccf882eed8f8..be62ec2ebea5 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,6 +190,8 @@ extern unsigned long get_next_timer_interrupt(unsigned long now);
*/
#ifdef CONFIG_TIMER_STATS
+extern int timer_stats_active;
+
#define TIMER_STATS_FLAG_DEFERRABLE 0x1
extern void init_timer_stats(void);
@@ -203,6 +205,8 @@ extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4f..b8dd079e7deb 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -183,12 +183,6 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
-#ifndef topology_thread_siblings
-#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
-#endif
-#ifndef topology_core_siblings
-#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
-#endif
#ifndef topology_thread_cpumask
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
#endif
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 5d44059f6d63..310e18a880ff 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,7 +42,6 @@ struct usbnet {
/* protocol/interface state */
struct net_device *net;
- struct net_device_stats stats;
int msg_enable;
unsigned long data [5];
u32 xid;
diff --git a/include/linux/usb/video.h b/include/linux/usb/video.h
new file mode 100644
index 000000000000..be436d9ee479
--- /dev/null
+++ b/include/linux/usb/video.h
@@ -0,0 +1,164 @@
+/*
+ * USB Video Class definitions.
+ *
+ * Copyright (C) 2009 Laurent Pinchart <laurent.pinchart@skynet.be>
+ *
+ * This file holds USB constants and structures defined by the USB Device
+ * Class Definition for Video Devices. Unless otherwise stated, comments
+ * below reference relevant sections of the USB Video Class 1.1 specification
+ * available at
+ *
+ * http://www.usb.org/developers/devclass_docs/USB_Video_Class_1_1.zip
+ */
+
+#ifndef __LINUX_USB_VIDEO_H
+#define __LINUX_USB_VIDEO_H
+
+#include <linux/types.h>
+
+/* --------------------------------------------------------------------------
+ * UVC constants
+ */
+
+/* A.2. Video Interface Subclass Codes */
+#define UVC_SC_UNDEFINED 0x00
+#define UVC_SC_VIDEOCONTROL 0x01
+#define UVC_SC_VIDEOSTREAMING 0x02
+#define UVC_SC_VIDEO_INTERFACE_COLLECTION 0x03
+
+/* A.3. Video Interface Protocol Codes */
+#define UVC_PC_PROTOCOL_UNDEFINED 0x00
+
+/* A.5. Video Class-Specific VC Interface Descriptor Subtypes */
+#define UVC_VC_DESCRIPTOR_UNDEFINED 0x00
+#define UVC_VC_HEADER 0x01
+#define UVC_VC_INPUT_TERMINAL 0x02
+#define UVC_VC_OUTPUT_TERMINAL 0x03
+#define UVC_VC_SELECTOR_UNIT 0x04
+#define UVC_VC_PROCESSING_UNIT 0x05
+#define UVC_VC_EXTENSION_UNIT 0x06
+
+/* A.6. Video Class-Specific VS Interface Descriptor Subtypes */
+#define UVC_VS_UNDEFINED 0x00
+#define UVC_VS_INPUT_HEADER 0x01
+#define UVC_VS_OUTPUT_HEADER 0x02
+#define UVC_VS_STILL_IMAGE_FRAME 0x03
+#define UVC_VS_FORMAT_UNCOMPRESSED 0x04
+#define UVC_VS_FRAME_UNCOMPRESSED 0x05
+#define UVC_VS_FORMAT_MJPEG 0x06
+#define UVC_VS_FRAME_MJPEG 0x07
+#define UVC_VS_FORMAT_MPEG2TS 0x0a
+#define UVC_VS_FORMAT_DV 0x0c
+#define UVC_VS_COLORFORMAT 0x0d
+#define UVC_VS_FORMAT_FRAME_BASED 0x10
+#define UVC_VS_FRAME_FRAME_BASED 0x11
+#define UVC_VS_FORMAT_STREAM_BASED 0x12
+
+/* A.7. Video Class-Specific Endpoint Descriptor Subtypes */
+#define UVC_EP_UNDEFINED 0x00
+#define UVC_EP_GENERAL 0x01
+#define UVC_EP_ENDPOINT 0x02
+#define UVC_EP_INTERRUPT 0x03
+
+/* A.8. Video Class-Specific Request Codes */
+#define UVC_RC_UNDEFINED 0x00
+#define UVC_SET_CUR 0x01
+#define UVC_GET_CUR 0x81
+#define UVC_GET_MIN 0x82
+#define UVC_GET_MAX 0x83
+#define UVC_GET_RES 0x84
+#define UVC_GET_LEN 0x85
+#define UVC_GET_INFO 0x86
+#define UVC_GET_DEF 0x87
+
+/* A.9.1. VideoControl Interface Control Selectors */
+#define UVC_VC_CONTROL_UNDEFINED 0x00
+#define UVC_VC_VIDEO_POWER_MODE_CONTROL 0x01
+#define UVC_VC_REQUEST_ERROR_CODE_CONTROL 0x02
+
+/* A.9.2. Terminal Control Selectors */
+#define UVC_TE_CONTROL_UNDEFINED 0x00
+
+/* A.9.3. Selector Unit Control Selectors */
+#define UVC_SU_CONTROL_UNDEFINED 0x00
+#define UVC_SU_INPUT_SELECT_CONTROL 0x01
+
+/* A.9.4. Camera Terminal Control Selectors */
+#define UVC_CT_CONTROL_UNDEFINED 0x00
+#define UVC_CT_SCANNING_MODE_CONTROL 0x01
+#define UVC_CT_AE_MODE_CONTROL 0x02
+#define UVC_CT_AE_PRIORITY_CONTROL 0x03
+#define UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL 0x04
+#define UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL 0x05
+#define UVC_CT_FOCUS_ABSOLUTE_CONTROL 0x06
+#define UVC_CT_FOCUS_RELATIVE_CONTROL 0x07
+#define UVC_CT_FOCUS_AUTO_CONTROL 0x08
+#define UVC_CT_IRIS_ABSOLUTE_CONTROL 0x09
+#define UVC_CT_IRIS_RELATIVE_CONTROL 0x0a
+#define UVC_CT_ZOOM_ABSOLUTE_CONTROL 0x0b
+#define UVC_CT_ZOOM_RELATIVE_CONTROL 0x0c
+#define UVC_CT_PANTILT_ABSOLUTE_CONTROL 0x0d
+#define UVC_CT_PANTILT_RELATIVE_CONTROL 0x0e
+#define UVC_CT_ROLL_ABSOLUTE_CONTROL 0x0f
+#define UVC_CT_ROLL_RELATIVE_CONTROL 0x10
+#define UVC_CT_PRIVACY_CONTROL 0x11
+
+/* A.9.5. Processing Unit Control Selectors */
+#define UVC_PU_CONTROL_UNDEFINED 0x00
+#define UVC_PU_BACKLIGHT_COMPENSATION_CONTROL 0x01
+#define UVC_PU_BRIGHTNESS_CONTROL 0x02
+#define UVC_PU_CONTRAST_CONTROL 0x03
+#define UVC_PU_GAIN_CONTROL 0x04
+#define UVC_PU_POWER_LINE_FREQUENCY_CONTROL 0x05
+#define UVC_PU_HUE_CONTROL 0x06
+#define UVC_PU_SATURATION_CONTROL 0x07
+#define UVC_PU_SHARPNESS_CONTROL 0x08
+#define UVC_PU_GAMMA_CONTROL 0x09
+#define UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL 0x0a
+#define UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL 0x0b
+#define UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL 0x0c
+#define UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL 0x0d
+#define UVC_PU_DIGITAL_MULTIPLIER_CONTROL 0x0e
+#define UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL 0x0f
+#define UVC_PU_HUE_AUTO_CONTROL 0x10
+#define UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL 0x11
+#define UVC_PU_ANALOG_LOCK_STATUS_CONTROL 0x12
+
+/* A.9.7. VideoStreaming Interface Control Selectors */
+#define UVC_VS_CONTROL_UNDEFINED 0x00
+#define UVC_VS_PROBE_CONTROL 0x01
+#define UVC_VS_COMMIT_CONTROL 0x02
+#define UVC_VS_STILL_PROBE_CONTROL 0x03
+#define UVC_VS_STILL_COMMIT_CONTROL 0x04
+#define UVC_VS_STILL_IMAGE_TRIGGER_CONTROL 0x05
+#define UVC_VS_STREAM_ERROR_CODE_CONTROL 0x06
+#define UVC_VS_GENERATE_KEY_FRAME_CONTROL 0x07
+#define UVC_VS_UPDATE_FRAME_SEGMENT_CONTROL 0x08
+#define UVC_VS_SYNC_DELAY_CONTROL 0x09
+
+/* B.1. USB Terminal Types */
+#define UVC_TT_VENDOR_SPECIFIC 0x0100
+#define UVC_TT_STREAMING 0x0101
+
+/* B.2. Input Terminal Types */
+#define UVC_ITT_VENDOR_SPECIFIC 0x0200
+#define UVC_ITT_CAMERA 0x0201
+#define UVC_ITT_MEDIA_TRANSPORT_INPUT 0x0202
+
+/* B.3. Output Terminal Types */
+#define UVC_OTT_VENDOR_SPECIFIC 0x0300
+#define UVC_OTT_DISPLAY 0x0301
+#define UVC_OTT_MEDIA_TRANSPORT_OUTPUT 0x0302
+
+/* B.4. External Terminal Types */
+#define UVC_EXTERNAL_VENDOR_SPECIFIC 0x0400
+#define UVC_COMPOSITE_CONNECTOR 0x0401
+#define UVC_SVIDEO_CONNECTOR 0x0402
+#define UVC_COMPONENT_CONNECTOR 0x0403
+
+/* 2.4.2.2. Status Packet Type */
+#define UVC_STATUS_TYPE_CONTROL 1
+#define UVC_STATUS_TYPE_STREAMING 2
+
+#endif /* __LINUX_USB_VIDEO_H */
+
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 8a025d510904..b7049b33f768 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -275,7 +275,9 @@ struct v4l2_pix_format {
__u32 priv; /* private data, depends on pixelformat */
};
-/* Pixel format FOURCC depth Description */
+/* Pixel format FOURCC depth Description */
+
+/* RGB formats */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
@@ -286,12 +288,20 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
+
+/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
+
+/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
+
+/* Luminance+Chrominance formats */
#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
@@ -301,6 +311,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
+#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
+#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */
+#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
+#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -308,23 +322,17 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
-/* The following formats are not defined in the V4L2 specification */
-#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
-#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */
-#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */
-#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
-#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
-
-/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
+/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
-/*
- * 10bit raw bayer, expanded to 16 bits
- * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
- */
-#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0')
-/* 10bit raw bayer DPCM compressed to 8 bits */
+#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */
+ /* 10bit raw bayer DPCM compressed to 8 bits */
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
+ /*
+ * 10bit raw bayer, expanded to 16 bits
+ * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
+ */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
/* compressed formats */
@@ -347,7 +355,6 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
-#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 13e1adf55c4c..f3a61fdb6f5d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -94,7 +94,7 @@ struct execute_work {
/*
* initialize all of a work item in one go
*
- * NOTE! No point in using "atomic_long_set()": useing a direct
+ * NOTE! No point in using "atomic_long_set()": using a direct
* assignment of the work data initializer allows the compiler
* to generate better code.
*/
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3224820c8514..e070b91905d6 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -14,17 +14,6 @@ extern struct list_head inode_in_use;
extern struct list_head inode_unused;
/*
- * Yes, writeback.h requires sched.h
- * No, sched.h is not included from here.
- */
-static inline int task_is_pdflush(struct task_struct *task)
-{
- return task->flags & PF_FLUSHER;
-}
-
-#define current_is_pdflush() task_is_pdflush(current)
-
-/*
* fs/fs-writeback.c
*/
enum writeback_sync_modes {
@@ -79,6 +68,7 @@ struct writeback_control {
void writeback_inodes(struct writeback_control *wbc);
int inode_wait(void *);
void sync_inodes_sb(struct super_block *, int wait);
+long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -98,7 +88,7 @@ static inline void inode_sync_wait(struct inode *inode)
/*
* mm/page-writeback.c
*/
-int wakeup_pdflush(long nr_pages);
+void wakeup_flusher_threads(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
void throttle_vm_writeout(gfp_t gfp_mask);
@@ -150,7 +140,6 @@ balance_dirty_pages_ratelimited(struct address_space *mapping)
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
-int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int write_cache_pages(struct address_space *mapping,
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 4d7e2272c42f..11a4a2d3e364 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -155,6 +155,9 @@ enum {
/* module cafe_ccic, just ident 8801 */
V4L2_IDENT_CAFE = 8801,
+ /* module mt9v011, just ident 8243 */
+ V4L2_IDENT_MT9V011 = 8243,
+
/* module tw9910: just ident 9910 */
V4L2_IDENT_TW9910 = 9910,
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5dcb36785529..89a39ce17294 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -220,6 +220,9 @@ struct v4l2_subdev_video_ops {
int (*g_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
int (*try_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
int (*s_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
+ int (*cropcap)(struct v4l2_subdev *sd, struct v4l2_cropcap *cc);
+ int (*g_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
+ int (*s_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
int (*g_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index e919fca1072a..06b072fd6d54 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -190,7 +190,7 @@ struct l2cap_conf_rfc {
#define L2CAP_MODE_RETRANS 0x01
#define L2CAP_MODE_FLOWCTL 0x02
#define L2CAP_MODE_ERTM 0x03
-#define L2CAP_MODE_STREAM 0x04
+#define L2CAP_MODE_STREAMING 0x04
struct l2cap_disconn_req {
__le16 dcid;
@@ -271,9 +271,11 @@ struct l2cap_pinfo {
__u16 imtu;
__u16 omtu;
__u16 flush_to;
- __u8 sec_level;
+ __u8 mode;
+ __u8 fcs;
+ __u8 sec_level;
__u8 role_switch;
- __u8 force_reliable;
+ __u8 force_reliable;
__u8 conf_req[64];
__u8 conf_len;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index a632689b61b4..cbdd6284996d 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -258,8 +258,8 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
/* Update TCP window tracking data when NAT mangles the packet */
extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
unsigned int dataoff,
- struct nf_conn *ct,
- int dir);
+ struct nf_conn *ct, int dir,
+ s16 offset);
/* Fake conntrack entry for untracked connections */
extern struct nf_conn nf_conntrack_untracked;
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 5054dc5ea2c2..29d126736611 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -45,6 +45,7 @@ int phonet_address_add(struct net_device *dev, u8 addr);
int phonet_address_del(struct net_device *dev, u8 addr);
u8 phonet_address_get(struct net_device *dev, u8 addr);
int phonet_address_lookup(struct net *net, u8 addr);
+void phonet_address_notify(int event, struct net_device *dev, u8 addr);
#define PN_NO_ADDR 0xff
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index ec8a45f9a069..35814ced2d22 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -279,6 +279,7 @@ int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
/* dapm events */
int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream,
int event);
+void snd_soc_dapm_shutdown(struct snd_soc_device *socdev);
/* dapm sys fs - used by the core */
int snd_soc_dapm_sys_add(struct device *dev);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index cf6111d72b17..e6704c0a4404 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -192,6 +192,11 @@ void snd_soc_unregister_platform(struct snd_soc_platform *platform);
int snd_soc_register_codec(struct snd_soc_codec *codec);
void snd_soc_unregister_codec(struct snd_soc_codec *codec);
+#ifdef CONFIG_PM
+int snd_soc_suspend_device(struct device *dev);
+int snd_soc_resume_device(struct device *dev);
+#endif
+
/* pcm <-> DAI connect */
void snd_soc_free_pcms(struct snd_soc_device *socdev);
int snd_soc_new_pcms(struct snd_soc_device *socdev, int idx, const char *xid);
@@ -216,9 +221,9 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
/* codec register bit access */
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value);
+ unsigned int mask, unsigned int value);
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value);
+ unsigned int mask, unsigned int value);
int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
struct snd_ac97_bus_ops *ops, int num);
@@ -369,8 +374,6 @@ struct snd_soc_codec {
enum snd_soc_bias_level bias_level;
enum snd_soc_bias_level suspend_bias_level;
struct delayed_work delayed_work;
- struct list_head up_list;
- struct list_head down_list;
/* codec DAI's */
struct snd_soc_dai *dai;
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index d136ea2181ed..9fd5b19ccf5c 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -35,6 +35,8 @@
#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
+#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
+#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
#define TLV_DB_SCALE_ITEM(min, step, mute) \
SNDRV_CTL_TLVT_DB_SCALE, 2 * sizeof(unsigned int), \
@@ -42,6 +44,18 @@
#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \
unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) }
+/* dB scale specified with min/max values instead of step */
+#define TLV_DB_MINMAX_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVT_DB_MINMAX, 2 * sizeof(unsigned int), \
+ (min_dB), (max_dB)
+#define TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVT_DB_MINMAX_MUTE, 2 * sizeof(unsigned int), \
+ (min_dB), (max_dB)
+#define DECLARE_TLV_DB_MINMAX(name, min_dB, max_dB) \
+ unsigned int name[] = { TLV_DB_MINMAX_ITEM(min_dB, max_dB) }
+#define DECLARE_TLV_DB_MINMAX_MUTE(name, min_dB, max_dB) \
+ unsigned int name[] = { TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) }
+
/* linear volume between min_dB and max_dB (.01dB unit) */
#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \
SNDRV_CTL_TLVT_DB_LINEAR, 2 * sizeof(unsigned int), \
diff --git a/include/sound/uda1380.h b/include/sound/uda1380.h
new file mode 100644
index 000000000000..381319c7000c
--- /dev/null
+++ b/include/sound/uda1380.h
@@ -0,0 +1,22 @@
+/*
+ * UDA1380 ALSA SoC Codec driver
+ *
+ * Copyright 2009 Philipp Zabel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UDA1380_H
+#define __UDA1380_H
+
+struct uda1380_platform_data {
+ int gpio_power;
+ int gpio_reset;
+ int dac_clk;
+#define UDA1380_DAC_CLK_SYSCLK 0
+#define UDA1380_DAC_CLK_WSPLL 1
+};
+
+#endif /* __UDA1380_H */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
new file mode 100644
index 000000000000..2c2923507909
--- /dev/null
+++ b/include/trace/events/kvm.h
@@ -0,0 +1,95 @@
+#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_MAIN_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_FILE kvm
+
+#if defined(__KVM_HAVE_IOAPIC) && defined(__KVM_HAVE_PIT)
+TRACE_EVENT(kvm_set_irq,
+ TP_PROTO(unsigned int gsi, int level, int irq_source_id),
+ TP_ARGS(gsi, level, irq_source_id),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, gsi )
+ __field( int, level )
+ __field( int, irq_source_id )
+ ),
+
+ TP_fast_assign(
+ __entry->gsi = gsi;
+ __entry->level = level;
+ __entry->irq_source_id = irq_source_id;
+ ),
+
+ TP_printk("gsi %u level %d source %d",
+ __entry->gsi, __entry->level, __entry->irq_source_id)
+);
+
+
+#define kvm_irqchips \
+ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
+ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
+ {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
+
+TRACE_EVENT(kvm_ack_irq,
+ TP_PROTO(unsigned int irqchip, unsigned int pin),
+ TP_ARGS(irqchip, pin),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irqchip )
+ __field( unsigned int, pin )
+ ),
+
+ TP_fast_assign(
+ __entry->irqchip = irqchip;
+ __entry->pin = pin;
+ ),
+
+ TP_printk("irqchip %s pin %u",
+ __print_symbolic(__entry->irqchip, kvm_irqchips),
+ __entry->pin)
+);
+
+
+
+#endif /* defined(__KVM_HAVE_IOAPIC) && defined(__KVM_HAVE_PIT) */
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+#define kvm_trace_symbol_mmio \
+ { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
+ { KVM_TRACE_MMIO_READ, "read" }, \
+ { KVM_TRACE_MMIO_WRITE, "write" }
+
+TRACE_EVENT(kvm_mmio,
+ TP_PROTO(int type, int len, u64 gpa, u64 val),
+ TP_ARGS(type, len, gpa, val),
+
+ TP_STRUCT__entry(
+ __field( u32, type )
+ __field( u32, len )
+ __field( u64, gpa )
+ __field( u64, val )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->len = len;
+ __entry->gpa = gpa;
+ __entry->val = val;
+ ),
+
+ TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
+ __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
+ __entry->len, __entry->gpa, __entry->val)
+);
+
+#endif /* _TRACE_KVM_MAIN_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/init/Kconfig b/init/Kconfig
index 1ce05a4cb5f6..a5e6419e24ce 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -302,13 +302,17 @@ config AUDITSYSCALL
help
Enable low-overhead system-call auditing infrastructure that
can be used independently or with another kernel subsystem,
- such as SELinux. To use audit's filesystem watch feature, please
- ensure that INOTIFY is configured.
+ such as SELinux.
+
+config AUDIT_WATCH
+ def_bool y
+ depends on AUDITSYSCALL
+ select FSNOTIFY
config AUDIT_TREE
def_bool y
depends on AUDITSYSCALL
- select INOTIFY
+ select FSNOTIFY
menu "RCU Subsystem"
@@ -316,21 +320,13 @@ choice
prompt "RCU Implementation"
default TREE_RCU
-config CLASSIC_RCU
- bool "Classic RCU"
- help
- This option selects the classic RCU implementation that is
- designed for best read-side performance on non-realtime
- systems.
-
- Select this option if you are unsure.
-
config TREE_RCU
bool "Tree-based hierarchical RCU"
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
- thousands of CPUs.
+ thousands of CPUs. It also scales down nicely to
+ smaller systems.
config PREEMPT_RCU
bool "Preemptible RCU"
@@ -1017,18 +1013,23 @@ config COMPAT_BRK
choice
prompt "Choose SLAB allocator"
- default SLUB
+ default SLQB_ALLOCATOR
help
This option allows to select a slab allocator.
-config SLAB
+config SLAB_ALLOCATOR
bool "SLAB"
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
per cpu and per node queues.
-config SLUB
+config SLAB
+ bool
+ default y
+ depends on SLAB_ALLOCATOR
+
+config SLUB_ALLOCATOR
bool "SLUB (Unqueued Allocator)"
help
SLUB is a slab allocator that minimizes cache line usage
@@ -1038,6 +1039,21 @@ config SLUB
and has enhanced diagnostics. SLUB is the default choice for
a slab allocator.
+config SLUB
+ bool
+ default y
+ depends on SLUB_ALLOCATOR
+
+config SLQB_ALLOCATOR
+ bool "SLQB (Queued allocator)"
+ help
+ SLQB is a proposed new slab allocator.
+
+config SLQB
+ bool
+ default y
+ depends on SLQB_ALLOCATOR
+
config SLOB
depends on EMBEDDED
bool "SLOB (Simple Allocator)"
@@ -1093,7 +1109,7 @@ config HAVE_GENERIC_DMA_COHERENT
config SLABINFO
bool
depends on PROC_FS
- depends on SLAB || SLUB_DEBUG
+ depends on SLAB || SLUB_DEBUG || SLQB
default y
config RT_MUTEXES
diff --git a/init/main.c b/init/main.c
index a1ff15084e59..8591d3c223e5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -354,17 +354,11 @@ static void __init smp_init(void)
#define smp_init() do { } while (0)
#endif
-static inline void setup_per_cpu_areas(void) { }
static inline void setup_nr_cpu_ids(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
-#if NR_CPUS > BITS_PER_LONG
-cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_mask_all);
-#endif
-
/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
@@ -375,29 +369,6 @@ static void __init setup_nr_cpu_ids(void)
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
-#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_offset);
-
-static void __init setup_per_cpu_areas(void)
-{
- unsigned long size, i;
- char *ptr;
- unsigned long nr_possible_cpus = num_possible_cpus();
-
- /* Copy section for each CPU (we discard the original) */
- size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
- ptr = alloc_bootmem_pages(size * nr_possible_cpus);
-
- for_each_possible_cpu(i) {
- __per_cpu_offset[i] = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
- ptr += size;
- }
-}
-#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
-
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index e35ba2c3a8d7..c5e68adc6732 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -32,6 +32,7 @@
#include <linux/nsproxy.h>
#include <linux/pid.h>
#include <linux/ipc_namespace.h>
+#include <linux/ima.h>
#include <net/sock.h>
#include "util.h"
@@ -733,6 +734,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
error = PTR_ERR(filp);
goto out_putfd;
}
+ ima_counts_get(filp);
fd_install(fd, filp);
goto out_upsem;
diff --git a/kernel/Makefile b/kernel/Makefile
index 780c8dcf4516..a00eb1c3a8af 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -69,10 +69,11 @@ obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
-obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
+obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
-obj-$(CONFIG_GCOV_KERNEL) += gcov/
+obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
+obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
@@ -80,7 +81,6 @@ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
-obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
@@ -96,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
+obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 7afa31564162..9f3391090b3e 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -215,6 +215,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
static int acct_on(char *name)
{
struct file *file;
+ struct vfsmount *mnt;
int error;
struct pid_namespace *ns;
struct bsd_acct_struct *acct = NULL;
@@ -256,11 +257,12 @@ static int acct_on(char *name)
acct = NULL;
}
- mnt_pin(file->f_path.mnt);
+ mnt = file->f_path.mnt;
+ mnt_pin(mnt);
acct_file_reopen(ns->bacct, file, ns);
spin_unlock(&acct_lock);
- mntput(file->f_path.mnt); /* it's pinned, now give up active reference */
+ mntput(mnt); /* it's pinned, now give up active reference */
kfree(acct);
return 0;
diff --git a/kernel/audit.c b/kernel/audit.c
index defc2e6f1e3b..601e3f280ee4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -55,7 +55,6 @@
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
-#include <linux/inotify.h>
#include <linux/freezer.h>
#include <linux/tty.h>
diff --git a/kernel/audit.h b/kernel/audit.h
index 208687be4f30..f7206db4e13d 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -103,21 +103,27 @@ extern struct mutex audit_filter_mutex;
extern void audit_free_rule_rcu(struct rcu_head *);
extern struct list_head audit_filter_list[];
+extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
+
/* audit watch functions */
-extern unsigned long audit_watch_inode(struct audit_watch *watch);
-extern dev_t audit_watch_dev(struct audit_watch *watch);
+#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
extern void audit_get_watch(struct audit_watch *watch);
extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
-extern int audit_add_watch(struct audit_krule *krule);
-extern void audit_remove_watch(struct audit_watch *watch);
-extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list);
-extern void audit_inotify_unregister(struct list_head *in_list);
+extern int audit_add_watch(struct audit_krule *krule, struct list_head **list);
+extern void audit_remove_watch_rule(struct audit_krule *krule);
extern char *audit_watch_path(struct audit_watch *watch);
-extern struct list_head *audit_watch_rules(struct audit_watch *watch);
-
-extern struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch);
+extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev);
+#else
+#define audit_put_watch(w) {}
+#define audit_get_watch(w) {}
+#define audit_to_watch(k, p, l, o) (-EINVAL)
+#define audit_add_watch(k, l) (-EINVAL)
+#define audit_remove_watch_rule(k) BUG()
+#define audit_watch_path(w) ""
+#define audit_watch_compare(w, i, d) 0
+
+#endif /* CONFIG_AUDIT_WATCH */
#ifdef CONFIG_AUDIT_TREE
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 2451dc6f3282..184ca3f35b86 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1,5 +1,5 @@
#include "audit.h"
-#include <linux/inotify.h>
+#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/kthread.h>
@@ -21,7 +21,7 @@ struct audit_tree {
struct audit_chunk {
struct list_head hash;
- struct inotify_watch watch;
+ struct fsnotify_mark_entry mark;
struct list_head trees; /* with root here */
int dead;
int count;
@@ -67,7 +67,7 @@ static LIST_HEAD(prune_list);
* that makes a difference. Some.
*/
-static struct inotify_handle *rtree_ih;
+static struct fsnotify_group *audit_tree_group;
static struct audit_tree *alloc_tree(const char *s)
{
@@ -110,29 +110,6 @@ const char *audit_tree_path(struct audit_tree *tree)
return tree->pathname;
}
-static struct audit_chunk *alloc_chunk(int count)
-{
- struct audit_chunk *chunk;
- size_t size;
- int i;
-
- size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
- chunk = kzalloc(size, GFP_KERNEL);
- if (!chunk)
- return NULL;
-
- INIT_LIST_HEAD(&chunk->hash);
- INIT_LIST_HEAD(&chunk->trees);
- chunk->count = count;
- atomic_long_set(&chunk->refs, 1);
- for (i = 0; i < count; i++) {
- INIT_LIST_HEAD(&chunk->owners[i].list);
- chunk->owners[i].index = i;
- }
- inotify_init_watch(&chunk->watch);
- return chunk;
-}
-
static void free_chunk(struct audit_chunk *chunk)
{
int i;
@@ -156,6 +133,35 @@ static void __put_chunk(struct rcu_head *rcu)
audit_put_chunk(chunk);
}
+static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry)
+{
+ struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+ call_rcu(&chunk->head, __put_chunk);
+}
+
+static struct audit_chunk *alloc_chunk(int count)
+{
+ struct audit_chunk *chunk;
+ size_t size;
+ int i;
+
+ size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
+ chunk = kzalloc(size, GFP_KERNEL);
+ if (!chunk)
+ return NULL;
+
+ INIT_LIST_HEAD(&chunk->hash);
+ INIT_LIST_HEAD(&chunk->trees);
+ chunk->count = count;
+ atomic_long_set(&chunk->refs, 1);
+ for (i = 0; i < count; i++) {
+ INIT_LIST_HEAD(&chunk->owners[i].list);
+ chunk->owners[i].index = i;
+ }
+ fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+ return chunk;
+}
+
enum {HASH_SIZE = 128};
static struct list_head chunk_hash_heads[HASH_SIZE];
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
@@ -166,10 +172,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
return chunk_hash_heads + n % HASH_SIZE;
}
-/* hash_lock is held by caller */
+/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- struct list_head *list = chunk_hash(chunk->watch.inode);
+ struct fsnotify_mark_entry *entry = &chunk->mark;
+ struct list_head *list;
+
+ if (!entry->inode)
+ return;
+ list = chunk_hash(entry->inode);
list_add_rcu(&chunk->hash, list);
}
@@ -180,7 +191,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
- if (p->watch.inode == inode) {
+ /* mark.inode may have gone NULL, but who cares? */
+ if (p->mark.inode == inode) {
atomic_long_inc(&p->refs);
return p;
}
@@ -209,38 +221,19 @@ static struct audit_chunk *find_chunk(struct node *p)
static void untag_chunk(struct node *p)
{
struct audit_chunk *chunk = find_chunk(p);
+ struct fsnotify_mark_entry *entry = &chunk->mark;
struct audit_chunk *new;
struct audit_tree *owner;
int size = chunk->count - 1;
int i, j;
- if (!pin_inotify_watch(&chunk->watch)) {
- /*
- * Filesystem is shutting down; all watches are getting
- * evicted, just take it off the node list for this
- * tree and let the eviction logics take care of the
- * rest.
- */
- owner = p->owner;
- if (owner->root == chunk) {
- list_del_init(&owner->same_root);
- owner->root = NULL;
- }
- list_del_init(&p->list);
- p->owner = NULL;
- put_tree(owner);
- return;
- }
+ fsnotify_get_mark(entry);
spin_unlock(&hash_lock);
- /*
- * pin_inotify_watch() succeeded, so the watch won't go away
- * from under us.
- */
- mutex_lock(&chunk->watch.inode->inotify_mutex);
- if (chunk->dead) {
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ spin_lock(&entry->lock);
+ if (chunk->dead || !entry->inode) {
+ spin_unlock(&entry->lock);
goto out;
}
@@ -255,16 +248,17 @@ static void untag_chunk(struct node *p)
list_del_init(&p->list);
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark_by_entry(entry);
+ fsnotify_put_mark(entry);
goto out;
}
new = alloc_chunk(size);
if (!new)
goto Fallback;
- if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
+ fsnotify_duplicate_mark(&new->mark, entry);
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) {
free_chunk(new);
goto Fallback;
}
@@ -297,9 +291,9 @@ static void untag_chunk(struct node *p)
list_for_each_entry(owner, &new->trees, same_root)
owner->root = new;
spin_unlock(&hash_lock);
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark_by_entry(entry);
+ fsnotify_put_mark(entry);
goto out;
Fallback:
@@ -313,31 +307,33 @@ Fallback:
p->owner = NULL;
put_tree(owner);
spin_unlock(&hash_lock);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ spin_unlock(&entry->lock);
out:
- unpin_inotify_watch(&chunk->watch);
+ fsnotify_put_mark(entry);
spin_lock(&hash_lock);
}
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{
+ struct fsnotify_mark_entry *entry;
struct audit_chunk *chunk = alloc_chunk(1);
if (!chunk)
return -ENOMEM;
- if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
+ entry = &chunk->mark;
+ if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) {
free_chunk(chunk);
return -ENOSPC;
}
- mutex_lock(&inode->inotify_mutex);
+ spin_lock(&entry->lock);
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark_by_entry(entry);
+ fsnotify_put_mark(entry);
return 0;
}
chunk->owners[0].index = (1U << 31);
@@ -350,30 +346,33 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
}
insert_hash(chunk);
spin_unlock(&hash_lock);
- mutex_unlock(&inode->inotify_mutex);
+ spin_unlock(&entry->lock);
return 0;
}
/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct inotify_watch *watch;
+ struct fsnotify_mark_entry *old_entry, *chunk_entry;
struct audit_tree *owner;
struct audit_chunk *chunk, *old;
struct node *p;
int n;
- if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
+ spin_lock(&inode->i_lock);
+ old_entry = fsnotify_find_mark_entry(audit_tree_group, inode);
+ spin_unlock(&inode->i_lock);
+ if (!old_entry)
return create_chunk(inode, tree);
- old = container_of(watch, struct audit_chunk, watch);
+ old = container_of(old_entry, struct audit_chunk, mark);
/* are we already there? */
spin_lock(&hash_lock);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
- put_inotify_watch(watch);
+ fsnotify_put_mark(old_entry);
return 0;
}
}
@@ -382,22 +381,40 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1);
if (!chunk)
return -ENOMEM;
+ chunk_entry = &chunk->mark;
- mutex_lock(&inode->inotify_mutex);
- if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
+ spin_lock(&old_entry->lock);
+ if (!old_entry->inode) {
+ /* old_entry is being shot, lets just lie */
+ spin_unlock(&old_entry->lock);
+ fsnotify_put_mark(old_entry);
free_chunk(chunk);
+ return -ENOENT;
+ }
+
+ fsnotify_duplicate_mark(chunk_entry, old_entry);
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) {
+ spin_unlock(&old_entry->lock);
+ free_chunk(chunk);
+ fsnotify_put_mark(old_entry);
return -ENOSPC;
}
+
+ /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
+ spin_lock(&chunk_entry->lock);
spin_lock(&hash_lock);
+
+ /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+
+ fsnotify_destroy_mark_by_entry(chunk_entry);
+
+ fsnotify_put_mark(chunk_entry);
+ fsnotify_put_mark(old_entry);
return 0;
}
list_replace_init(&old->trees, &chunk->trees);
@@ -423,9 +440,10 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
list_add(&tree->same_root, &chunk->trees);
}
spin_unlock(&hash_lock);
- inotify_evict_watch(&old->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+ fsnotify_destroy_mark_by_entry(old_entry);
+ fsnotify_put_mark(old_entry);
return 0;
}
@@ -578,7 +596,8 @@ void audit_trim_trees(void)
spin_lock(&hash_lock);
list_for_each_entry(node, &tree->chunks, list) {
struct audit_chunk *chunk = find_chunk(node);
- struct inode *inode = chunk->watch.inode;
+ /* this could be NULL if the watch is dieing else where... */
+ struct inode *inode = chunk->mark.inode;
struct vfsmount *mnt;
node->index |= 1U<<31;
list_for_each_entry(mnt, &list, mnt_list) {
@@ -925,34 +944,40 @@ static void evict_chunk(struct audit_chunk *chunk)
mutex_unlock(&audit_filter_mutex);
}
-static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
+static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
- struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
+ BUG();
+ return -EOPNOTSUPP;
+}
- if (mask & IN_IGNORED) {
- evict_chunk(chunk);
- put_inotify_watch(watch);
- }
+static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
+{
+ struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+
+ evict_chunk(chunk);
+ fsnotify_put_mark(entry);
}
-static void destroy_watch(struct inotify_watch *watch)
+static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
{
- struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
- call_rcu(&chunk->head, __put_chunk);
+ return 0;
}
-static const struct inotify_operations rtree_inotify_ops = {
- .handle_event = handle_event,
- .destroy_watch = destroy_watch,
+static const struct fsnotify_ops audit_tree_ops = {
+ .handle_event = audit_tree_handle_event,
+ .should_send_event = audit_tree_send_event,
+ .free_group_priv = NULL,
+ .free_event_priv = NULL,
+ .freeing_mark = audit_tree_freeing_mark,
};
static int __init audit_tree_init(void)
{
int i;
- rtree_ih = inotify_init(&rtree_inotify_ops);
- if (IS_ERR(rtree_ih))
+ audit_tree_group = fsnotify_obtain_group(AUDIT_TREE_GROUP_NUM,
+ 0, &audit_tree_ops);
+ if (IS_ERR(audit_tree_group))
audit_panic("cannot initialize inotify handle for rectree watches");
for (i = 0; i < HASH_SIZE; i++)
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 0e96dbc60ea9..cfaa248b21f7 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -24,17 +24,17 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/fs.h>
+#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/netlink.h>
#include <linux/sched.h>
-#include <linux/inotify.h>
#include <linux/security.h>
#include "audit.h"
/*
* Reference counting:
*
- * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
+ * audit_parent: lifetime is from audit_init_parent() to receipt of an FS_IGNORED
* event. Each audit_watch holds a reference to its associated parent.
*
* audit_watch: if added to lists, lifetime is from audit_init_watch() to
@@ -50,40 +50,64 @@ struct audit_watch {
unsigned long ino; /* associated inode number */
struct audit_parent *parent; /* associated parent */
struct list_head wlist; /* entry in parent->watches list */
- struct list_head rules; /* associated rules */
+ struct list_head rules; /* anchor for krule->rlist */
};
struct audit_parent {
- struct list_head ilist; /* entry in inotify registration list */
- struct list_head watches; /* associated watches */
- struct inotify_watch wdata; /* inotify watch data */
- unsigned flags; /* status flags */
+ struct list_head watches; /* anchor for audit_watch->wlist */
+ struct fsnotify_mark_entry mark; /* fsnotify mark on the inode */
};
-/* Inotify handle. */
-struct inotify_handle *audit_ih;
+/* fsnotify handle. */
+struct fsnotify_group *audit_watch_group;
-/*
- * audit_parent status flags:
- *
- * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
- * a filesystem event to ensure we're adding audit watches to a valid parent.
- * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
- * receive them while we have nameidata, but must be used for IN_MOVE_SELF which
- * we can receive while holding nameidata.
- */
-#define AUDIT_PARENT_INVALID 0x001
+/* fsnotify events we care about. */
+#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
+ FS_MOVE_SELF | FS_EVENT_ON_CHILD)
-/* Inotify events we care about. */
-#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
+static void audit_free_parent(struct audit_parent *parent)
+{
+ WARN_ON(!list_empty(&parent->watches));
+ kfree(parent);
+}
-static void audit_free_parent(struct inotify_watch *i_watch)
+static void audit_watch_free_mark(struct fsnotify_mark_entry *entry)
{
struct audit_parent *parent;
- parent = container_of(i_watch, struct audit_parent, wdata);
- WARN_ON(!list_empty(&parent->watches));
- kfree(parent);
+ parent = container_of(entry, struct audit_parent, mark);
+ audit_free_parent(parent);
+}
+
+static void audit_get_parent(struct audit_parent *parent)
+{
+ if (likely(parent))
+ fsnotify_get_mark(&parent->mark);
+}
+
+static void audit_put_parent(struct audit_parent *parent)
+{
+ if (likely(parent))
+ fsnotify_put_mark(&parent->mark);
+}
+
+/*
+ * Find and return the audit_parent on the given inode. If found a reference
+ * is taken on this parent.
+ */
+static inline struct audit_parent *audit_find_parent(struct inode *inode)
+{
+ struct audit_parent *parent = NULL;
+ struct fsnotify_mark_entry *entry;
+
+ spin_lock(&inode->i_lock);
+ entry = fsnotify_find_mark_entry(audit_watch_group, inode);
+ spin_unlock(&inode->i_lock);
+
+ if (entry)
+ parent = container_of(entry, struct audit_parent, mark);
+
+ return parent;
}
void audit_get_watch(struct audit_watch *watch)
@@ -104,7 +128,7 @@ void audit_put_watch(struct audit_watch *watch)
void audit_remove_watch(struct audit_watch *watch)
{
list_del(&watch->wlist);
- put_inotify_watch(&watch->parent->wdata);
+ audit_put_parent(watch->parent);
watch->parent = NULL;
audit_put_watch(watch); /* match initial get */
}
@@ -114,42 +138,32 @@ char *audit_watch_path(struct audit_watch *watch)
return watch->path;
}
-struct list_head *audit_watch_rules(struct audit_watch *watch)
-{
- return &watch->rules;
-}
-
-unsigned long audit_watch_inode(struct audit_watch *watch)
-{
- return watch->ino;
-}
-
-dev_t audit_watch_dev(struct audit_watch *watch)
+int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
{
- return watch->dev;
+ return (watch->ino != (unsigned long)-1) &&
+ (watch->ino == ino) &&
+ (watch->dev == dev);
}
/* Initialize a parent watch entry. */
static struct audit_parent *audit_init_parent(struct nameidata *ndp)
{
+ struct inode *inode = ndp->path.dentry->d_inode;
struct audit_parent *parent;
- s32 wd;
+ int ret;
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (unlikely(!parent))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&parent->watches);
- parent->flags = 0;
-
- inotify_init_watch(&parent->wdata);
- /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
- get_inotify_watch(&parent->wdata);
- wd = inotify_add_watch(audit_ih, &parent->wdata,
- ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
- if (wd < 0) {
- audit_free_parent(&parent->wdata);
- return ERR_PTR(wd);
+
+ fsnotify_init_mark(&parent->mark, audit_watch_free_mark);
+ parent->mark.mask = AUDIT_FS_WATCH;
+ ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, 0);
+ if (ret < 0) {
+ audit_free_parent(parent);
+ return ERR_PTR(ret);
}
return parent;
@@ -178,7 +192,7 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
{
struct audit_watch *watch;
- if (!audit_ih)
+ if (!audit_watch_group)
return -EOPNOTSUPP;
if (path[0] != '/' || path[len-1] == '/' ||
@@ -216,7 +230,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
new->dev = old->dev;
new->ino = old->ino;
- get_inotify_watch(&old->parent->wdata);
+ audit_get_parent(old->parent);
new->parent = old->parent;
out:
@@ -250,15 +264,19 @@ static void audit_update_watch(struct audit_parent *parent,
struct audit_entry *oentry, *nentry;
mutex_lock(&audit_filter_mutex);
+ /* Run all of the watches on this parent looking for the one that
+ * matches the given dname */
list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
if (audit_compare_dname_path(dname, owatch->path, NULL))
continue;
/* If the update involves invalidating rules, do the inode-based
* filtering now, so we don't omit records. */
- if (invalidating && current->audit_context)
+ if (invalidating && !audit_dummy_context())
audit_filter_inodes(current, current->audit_context);
+ /* updating ino will likely change which audit_hash_list we
+ * are on so we need a new watch for the new list */
nwatch = audit_dupe_watch(owatch);
if (IS_ERR(nwatch)) {
mutex_unlock(&audit_filter_mutex);
@@ -274,12 +292,21 @@ static void audit_update_watch(struct audit_parent *parent,
list_del(&oentry->rule.rlist);
list_del_rcu(&oentry->list);
- nentry = audit_dupe_rule(&oentry->rule, nwatch);
+ nentry = audit_dupe_rule(&oentry->rule);
if (IS_ERR(nentry)) {
list_del(&oentry->rule.list);
audit_panic("error updating watch, removing");
} else {
int h = audit_hash_ino((u32)ino);
+
+ /*
+ * nentry->rule.watch == oentry->rule.watch so
+ * we must drop that reference and set it to our
+ * new watch.
+ */
+ audit_put_watch(nentry->rule.watch);
+ audit_get_watch(nwatch);
+ nentry->rule.watch = nwatch;
list_add(&nentry->rule.rlist, &nwatch->rules);
list_add_rcu(&nentry->list, &audit_inode_hash[h]);
list_replace(&oentry->rule.list,
@@ -311,7 +338,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
struct audit_entry *e;
mutex_lock(&audit_filter_mutex);
- parent->flags |= AUDIT_PARENT_INVALID;
list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
e = container_of(r, struct audit_entry, rule);
@@ -324,20 +350,8 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
audit_remove_watch(w);
}
mutex_unlock(&audit_filter_mutex);
-}
-
-/* Unregister inotify watches for parents on in_list.
- * Generates an IN_IGNORED event. */
-void audit_inotify_unregister(struct list_head *in_list)
-{
- struct audit_parent *p, *n;
- list_for_each_entry_safe(p, n, in_list, ilist) {
- list_del(&p->ilist);
- inotify_rm_watch(audit_ih, &p->wdata);
- /* the unpin matching the pin in audit_do_del_rule() */
- unpin_inotify_watch(&p->wdata);
- }
+ fsnotify_destroy_mark_by_entry(&parent->mark);
}
/* Get path information necessary for adding watches. */
@@ -388,7 +402,7 @@ static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
}
}
-/* Associate the given rule with an existing parent inotify_watch.
+/* Associate the given rule with an existing parent.
* Caller must hold audit_filter_mutex. */
static void audit_add_to_parent(struct audit_krule *krule,
struct audit_parent *parent)
@@ -396,6 +410,8 @@ static void audit_add_to_parent(struct audit_krule *krule,
struct audit_watch *w, *watch = krule->watch;
int watch_found = 0;
+ BUG_ON(!mutex_is_locked(&audit_filter_mutex));
+
list_for_each_entry(w, &parent->watches, wlist) {
if (strcmp(watch->path, w->path))
continue;
@@ -412,7 +428,7 @@ static void audit_add_to_parent(struct audit_krule *krule,
}
if (!watch_found) {
- get_inotify_watch(&parent->wdata);
+ audit_get_parent(parent);
watch->parent = parent;
list_add(&watch->wlist, &parent->watches);
@@ -422,13 +438,12 @@ static void audit_add_to_parent(struct audit_krule *krule,
/* Find a matching watch entry, or add this one.
* Caller must hold audit_filter_mutex. */
-int audit_add_watch(struct audit_krule *krule)
+int audit_add_watch(struct audit_krule *krule, struct list_head **list)
{
struct audit_watch *watch = krule->watch;
- struct inotify_watch *i_watch;
struct audit_parent *parent;
struct nameidata *ndp = NULL, *ndw = NULL;
- int ret = 0;
+ int h, ret = 0;
mutex_unlock(&audit_filter_mutex);
@@ -440,47 +455,38 @@ int audit_add_watch(struct audit_krule *krule)
goto error;
}
+ mutex_lock(&audit_filter_mutex);
+
/* update watch filter fields */
if (ndw) {
watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
watch->ino = ndw->path.dentry->d_inode->i_ino;
}
- /* The audit_filter_mutex must not be held during inotify calls because
- * we hold it during inotify event callback processing. If an existing
- * inotify watch is found, inotify_find_watch() grabs a reference before
- * returning.
- */
- if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
- &i_watch) < 0) {
+ /* either find an old parent or attach a new one */
+ parent = audit_find_parent(ndp->path.dentry->d_inode);
+ if (!parent) {
parent = audit_init_parent(ndp);
if (IS_ERR(parent)) {
- /* caller expects mutex locked */
- mutex_lock(&audit_filter_mutex);
ret = PTR_ERR(parent);
goto error;
}
- } else
- parent = container_of(i_watch, struct audit_parent, wdata);
-
- mutex_lock(&audit_filter_mutex);
+ }
- /* parent was moved before we took audit_filter_mutex */
- if (parent->flags & AUDIT_PARENT_INVALID)
- ret = -ENOENT;
- else
- audit_add_to_parent(krule, parent);
+ audit_add_to_parent(krule, parent);
- /* match get in audit_init_parent or inotify_find_watch */
- put_inotify_watch(&parent->wdata);
+ /* match get in audit_find_parent or audit_init_parent */
+ audit_put_parent(parent);
+ h = audit_hash_ino((u32)watch->ino);
+ *list = &audit_inode_hash[h];
error:
audit_put_nd(ndp, ndw); /* NULL args OK */
return ret;
}
-void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
+void audit_remove_watch_rule(struct audit_krule *krule)
{
struct audit_watch *watch = krule->watch;
struct audit_parent *parent = watch->parent;
@@ -491,53 +497,90 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
audit_remove_watch(watch);
if (list_empty(&parent->watches)) {
- /* Put parent on the inotify un-registration
- * list. Grab a reference before releasing
- * audit_filter_mutex, to be released in
- * audit_inotify_unregister().
- * If filesystem is going away, just leave
- * the sucker alone, eviction will take
- * care of it. */
- if (pin_inotify_watch(&parent->wdata))
- list_add(&parent->ilist, list);
+ audit_get_parent(parent);
+ fsnotify_destroy_mark_by_entry(&parent->mark);
+ audit_put_parent(parent);
}
}
}
-/* Update watch data in audit rules based on inotify events. */
-static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
+static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
+{
+ struct fsnotify_mark_entry *entry;
+ bool send;
+
+ spin_lock(&inode->i_lock);
+ entry = fsnotify_find_mark_entry(group, inode);
+ spin_unlock(&inode->i_lock);
+ if (!entry)
+ return false;
+
+ mask = (mask & ~FS_EVENT_ON_CHILD);
+ send = (entry->mask & mask);
+
+ /* find took a reference */
+ fsnotify_put_mark(entry);
+
+ return send;
+}
+
+/* Update watch data in audit rules based on fsnotify events. */
+static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
+ struct inode *inode;
+ __u32 mask = event->mask;
+ const char *dname = event->file_name;
struct audit_parent *parent;
- parent = container_of(i_watch, struct audit_parent, wdata);
+ BUG_ON(group != audit_watch_group);
- if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
- audit_update_watch(parent, dname, inode->i_sb->s_dev,
- inode->i_ino, 0);
- else if (mask & (IN_DELETE|IN_MOVED_FROM))
+ parent = audit_find_parent(event->to_tell);
+ if (unlikely(!parent))
+ return 0;
+
+ switch (event->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
+ inode = event->path.dentry->d_inode;
+ break;
+ case (FSNOTIFY_EVENT_INODE):
+ inode = event->inode;
+ break;
+ default:
+ BUG();
+ inode = NULL;
+ break;
+ };
+
+ if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
+ audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
+ else if (mask & (FS_DELETE|FS_MOVED_FROM))
audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
- /* inotify automatically removes the watch and sends IN_IGNORED */
- else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
+ else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
audit_remove_parent_watches(parent);
- /* inotify does not remove the watch, so remove it manually */
- else if(mask & IN_MOVE_SELF) {
- audit_remove_parent_watches(parent);
- inotify_remove_watch_locked(audit_ih, i_watch);
- } else if (mask & IN_IGNORED)
- put_inotify_watch(i_watch);
+ /* moved put_inotify_watch to freeing mark */
+
+ /* matched the ref taken by audit_find_parent */
+ audit_put_parent(parent);
+
+ return 0;
}
-static const struct inotify_operations audit_inotify_ops = {
- .handle_event = audit_handle_ievent,
- .destroy_watch = audit_free_parent,
+static const struct fsnotify_ops audit_watch_fsnotify_ops = {
+ .should_send_event = audit_watch_should_send_event,
+ .handle_event = audit_watch_handle_event,
+ .free_group_priv = NULL,
+ .freeing_mark = NULL,
+ .free_event_priv = NULL,
};
static int __init audit_watch_init(void)
{
- audit_ih = inotify_init(&audit_inotify_ops);
- if (IS_ERR(audit_ih))
- audit_panic("cannot initialize inotify handle");
+ audit_watch_group = fsnotify_obtain_group(AUDIT_WATCH_GROUP_NUM, AUDIT_FS_WATCH,
+ &audit_watch_fsnotify_ops);
+ if (IS_ERR(audit_watch_group)) {
+ audit_watch_group = NULL;
+ audit_panic("cannot create audit fsnotify group");
+ }
return 0;
}
-subsys_initcall(audit_watch_init);
+device_initcall(audit_watch_init);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index a70604047f3c..f5e4cae5ad82 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -70,6 +70,7 @@ static inline void audit_free_rule(struct audit_entry *e)
{
int i;
struct audit_krule *erule = &e->rule;
+
/* some rules don't have associated watches */
if (erule->watch)
audit_put_watch(erule->watch);
@@ -745,8 +746,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
* rule with the new rule in the filterlist, then free the old rule.
* The rlist element is undefined; list manipulations are handled apart from
* the initial copy. */
-struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch)
+struct audit_entry *audit_dupe_rule(struct audit_krule *old)
{
u32 fcount = old->field_count;
struct audit_entry *entry;
@@ -768,8 +768,8 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
new->prio = old->prio;
new->buflen = old->buflen;
new->inode_f = old->inode_f;
- new->watch = NULL;
new->field_count = old->field_count;
+
/*
* note that we are OK with not refcounting here; audit_match_tree()
* never dereferences tree and we can't get false positives there
@@ -810,9 +810,9 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
}
}
- if (watch) {
- audit_get_watch(watch);
- new->watch = watch;
+ if (old->watch) {
+ audit_get_watch(old->watch);
+ new->watch = old->watch;
}
return entry;
@@ -865,7 +865,7 @@ static inline int audit_add_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
- int h, err;
+ int err;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -888,15 +888,11 @@ static inline int audit_add_rule(struct audit_entry *entry)
if (watch) {
/* audit_filter_mutex is dropped and re-taken during this call */
- err = audit_add_watch(&entry->rule);
+ err = audit_add_watch(&entry->rule, &list);
if (err) {
mutex_unlock(&audit_filter_mutex);
goto error;
}
- /* entry->rule.watch may have changed during audit_add_watch() */
- watch = entry->rule.watch;
- h = audit_hash_ino((u32)audit_watch_inode(watch));
- list = &audit_inode_hash[h];
}
if (tree) {
err = audit_add_tree_rule(&entry->rule);
@@ -948,7 +944,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
- LIST_HEAD(inotify_list);
int ret = 0;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -968,7 +963,7 @@ static inline int audit_del_rule(struct audit_entry *entry)
}
if (e->rule.watch)
- audit_remove_watch_rule(&e->rule, &inotify_list);
+ audit_remove_watch_rule(&e->rule);
if (e->rule.tree)
audit_remove_tree_rule(&e->rule);
@@ -986,9 +981,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
#endif
mutex_unlock(&audit_filter_mutex);
- if (!list_empty(&inotify_list))
- audit_inotify_unregister(&inotify_list);
-
out:
if (watch)
audit_put_watch(watch); /* match initial get */
@@ -1322,30 +1314,23 @@ static int update_lsm_rule(struct audit_krule *r)
{
struct audit_entry *entry = container_of(r, struct audit_entry, rule);
struct audit_entry *nentry;
- struct audit_watch *watch;
- struct audit_tree *tree;
int err = 0;
if (!security_audit_rule_known(r))
return 0;
- watch = r->watch;
- tree = r->tree;
- nentry = audit_dupe_rule(r, watch);
+ nentry = audit_dupe_rule(r);
if (IS_ERR(nentry)) {
/* save the first error encountered for the
* return value */
err = PTR_ERR(nentry);
audit_panic("error updating LSM filters");
- if (watch)
+ if (r->watch)
list_del(&r->rlist);
list_del_rcu(&entry->list);
list_del(&r->list);
} else {
- if (watch) {
- list_add(&nentry->rule.rlist, audit_watch_rules(watch));
- list_del(&r->rlist);
- } else if (tree)
+ if (r->watch || r->tree)
list_replace_init(&r->rlist, &nentry->rule.rlist);
list_replace_rcu(&entry->list, &nentry->list);
list_replace(&r->list, &nentry->rule.list);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 68d3c6a0ecd6..2a5dbfefa8de 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -549,9 +549,8 @@ static int audit_filter_rules(struct task_struct *tsk,
}
break;
case AUDIT_WATCH:
- if (name && audit_watch_inode(rule->watch) != (unsigned long)-1)
- result = (name->dev == audit_watch_dev(rule->watch) &&
- name->ino == audit_watch_inode(rule->watch));
+ if (name)
+ result = audit_watch_compare(rule->watch, name->ino, name->dev);
break;
case AUDIT_DIR:
if (ctx)
@@ -1726,7 +1725,7 @@ static inline void handle_one(const struct inode *inode)
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
- if (likely(list_empty(&inode->inotify_watches)))
+ if (likely(hlist_empty(&inode->i_fsnotify_mark_entries)))
return;
context = current->audit_context;
p = context->trees;
@@ -1769,7 +1768,7 @@ retry:
seq = read_seqbegin(&rename_lock);
for(;;) {
struct inode *inode = d->d_inode;
- if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
+ if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_mark_entries))) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
if (chunk) {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3737a682cdf5..ea255fe6c344 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -599,6 +599,7 @@ static struct inode_operations cgroup_dir_inode_operations;
static struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
+ .name = "cgroup",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/kernel/futex.c b/kernel/futex.c
index 1c337112335c..794c862125fe 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -299,7 +299,7 @@ void put_futex_key(int fshared, union futex_key *key)
static int fault_in_user_writeable(u32 __user *uaddr)
{
int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
- sizeof(*uaddr), 1, 0, NULL, NULL);
+ 1, 1, 0, NULL, NULL);
return ret < 0 ? ret : 0;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 50da67672901..aeb4ee0f6883 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -935,7 +935,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
kfree(action);
#ifdef CONFIG_DEBUG_SHIRQ
- if (irqflags & IRQF_SHARED) {
+ if (!retval && (irqflags & IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
* to happen immediately, so let's make sure....
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 638d8bedec14..a0bb09e79867 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -15,10 +15,10 @@
/**
* suspend_device_irqs - disable all currently enabled interrupt lines
*
- * During system-wide suspend or hibernation device interrupts need to be
- * disabled at the chip level and this function is provided for this purpose.
- * It disables all interrupt lines that are enabled at the moment and sets the
- * IRQ_SUSPENDED flag for them.
+ * During system-wide suspend or hibernation device drivers need to be prevented
+ * from receiving interrupts and this function is provided for this purpose.
+ * It marks all interrupt lines in use, except for the timer ones, as disabled
+ * and sets the IRQ_SUSPENDED flag for each of them.
*/
void suspend_device_irqs(void)
{
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 26539e3228e5..3765ff3c1bbe 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(kfifo_free);
* writer, you don't need extra locking to use these functions.
*/
unsigned int __kfifo_put(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len)
+ const unsigned char *buffer, unsigned int len)
{
unsigned int l;
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 9147a3190c9d..6a90eb6c2b67 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -363,7 +363,7 @@ static void put_packet(char *buffer)
* Convert the memory pointed to by mem into hex, placing result in buf.
* Return a pointer to the last char put in buf (null). May return an error.
*/
-int kgdb_mem2hex(char *mem, char *buf, int count)
+int __weak kgdb_mem2hex(char *mem, char *buf, int count)
{
char *tmp;
int err;
@@ -393,7 +393,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
* 0x7d escaped with 0x7d. Return a pointer to the character after
* the last byte written.
*/
-static int kgdb_ebin2mem(char *buf, char *mem, int count)
+int __weak kgdb_ebin2mem(char *buf, char *mem, int count)
{
int err = 0;
char c;
@@ -418,7 +418,7 @@ static int kgdb_ebin2mem(char *buf, char *mem, int count)
* Return a pointer to the character AFTER the last byte written.
* May return an error.
*/
-int kgdb_hex2mem(char *buf, char *mem, int count)
+int __weak kgdb_hex2mem(char *buf, char *mem, int count)
{
char *tmp_raw;
char *tmp_hex;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c0fa54b276d9..6fe9dc6d1a81 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe_insn_page {
- struct hlist_node hlist;
+ struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
char slot_used[INSNS_PER_PAGE];
int nused;
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
};
static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
-static struct hlist_head kprobe_insn_pages;
+static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -152,10 +152,9 @@ loop_end:
static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
retry:
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
@@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
kfree(kip);
return NULL;
}
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist, &kprobe_insn_pages);
+ INIT_LIST_HEAD(&kip->list);
+ list_add(&kip->list, &kprobe_insn_pages);
memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
@@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
- hlist_del(&kip->hlist);
- if (hlist_empty(&kprobe_insn_pages)) {
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist,
- &kprobe_insn_pages);
- } else {
+ if (!list_is_singular(&kprobe_insn_pages)) {
+ list_del(&kip->list);
module_free(NULL, kip->insns);
kfree(kip);
}
@@ -235,18 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
static int __kprobes collect_garbage_slots(void)
{
- struct kprobe_insn_page *kip;
- struct hlist_node *pos, *next;
- int safety;
+ struct kprobe_insn_page *kip, *next;
/* Ensure no-one is preepmted on the garbages */
- mutex_unlock(&kprobe_insn_mutex);
- safety = check_safety();
- mutex_lock(&kprobe_insn_mutex);
- if (safety != 0)
+ if (check_safety())
return -EAGAIN;
- hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
+ list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
@@ -264,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
mutex_lock(&kprobe_insn_mutex);
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) {
kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++;
- } else {
+ } else
collect_one_slot(kip, i);
- }
break;
}
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9b1a7de26979..eb8751aa0418 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind);
* @k: thread created by kthread_create().
*
* Sets kthread_should_stop() for @k to return true, wakes it, and
- * waits for it to exit. Your threadfn() must not call do_exit()
- * itself if you use this function! This can also be called after
- * kthread_create() instead of calling wake_up_process(): the thread
- * will exit without calling threadfn().
+ * waits for it to exit. This can also be called after kthread_create()
+ * instead of calling wake_up_process(): the thread will exit without
+ * calling threadfn().
+ *
+ * If threadfn() may call do_exit() itself, the caller must ensure
+ * task_struct can't go away.
*
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
* was never called.
diff --git a/kernel/module.c b/kernel/module.c
index 38928fcaff2b..c2ed903ebcd2 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -364,7 +364,7 @@ EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
-#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
@@ -389,7 +389,7 @@ static void percpu_modfree(void *freeme)
free_percpu(freeme);
}
-#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
/* Number of blocks used and allocated. */
static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -535,7 +535,7 @@ static int percpu_modinit(void)
}
__initcall(percpu_modinit);
-#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
{
const unsigned long *crc;
- if (!find_symbol("module_layout", NULL, &crc, true, false))
+ if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
+ &crc, true, false))
BUG();
return check_version(sechdrs, versindex, "module_layout", mod, crc);
}
diff --git a/kernel/panic.c b/kernel/panic.c
index 984b3ecbd72c..b855012e1ee0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -177,7 +177,7 @@ static const struct tnt tnts[] = {
* 'W' - Taint on warning.
* 'C' - modules from drivers/staging are loaded.
*
- * The string is overwritten by the next call to print_taint().
+ * The string is overwritten by the next call to print_tainted().
*/
const char *print_tainted(void)
{
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 1a933a221ea4..fc3b97410bbf 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
void __weak perf_counter_print_debug(void) { }
-static DEFINE_PER_CPU(int, disable_count);
+static DEFINE_PER_CPU(int, perf_disable_count);
void __perf_disable(void)
{
- __get_cpu_var(disable_count)++;
+ __get_cpu_var(perf_disable_count)++;
}
bool __perf_enable(void)
{
- return !--__get_cpu_var(disable_count);
+ return !--__get_cpu_var(perf_disable_count);
}
void perf_disable(void)
@@ -236,6 +236,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat++;
}
/*
@@ -250,6 +252,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
if (list_empty(&counter->list_entry))
return;
ctx->nr_counters--;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat--;
list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);
@@ -1006,6 +1010,81 @@ static int context_equiv(struct perf_counter_context *ctx1,
&& !ctx1->pin_count && !ctx2->pin_count;
}
+static void __perf_counter_read(void *counter);
+
+static void __perf_counter_sync_stat(struct perf_counter *counter,
+ struct perf_counter *next_counter)
+{
+ u64 value;
+
+ if (!counter->attr.inherit_stat)
+ return;
+
+ /*
+ * Update the counter value, we cannot use perf_counter_read()
+ * because we're in the middle of a context switch and have IRQs
+ * disabled, which upsets smp_call_function_single(), however
+ * we know the counter must be on the current CPU, therefore we
+ * don't need to use it.
+ */
+ switch (counter->state) {
+ case PERF_COUNTER_STATE_ACTIVE:
+ __perf_counter_read(counter);
+ break;
+
+ case PERF_COUNTER_STATE_INACTIVE:
+ update_counter_times(counter);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * In order to keep per-task stats reliable we need to flip the counter
+ * values when we flip the contexts.
+ */
+ value = atomic64_read(&next_counter->count);
+ value = atomic64_xchg(&counter->count, value);
+ atomic64_set(&next_counter->count, value);
+
+ swap(counter->total_time_enabled, next_counter->total_time_enabled);
+ swap(counter->total_time_running, next_counter->total_time_running);
+
+ /*
+ * Since we swizzled the values, update the user visible data too.
+ */
+ perf_counter_update_userpage(counter);
+ perf_counter_update_userpage(next_counter);
+}
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+
+static void perf_counter_sync_stat(struct perf_counter_context *ctx,
+ struct perf_counter_context *next_ctx)
+{
+ struct perf_counter *counter, *next_counter;
+
+ if (!ctx->nr_stat)
+ return;
+
+ counter = list_first_entry(&ctx->event_list,
+ struct perf_counter, event_entry);
+
+ next_counter = list_first_entry(&next_ctx->event_list,
+ struct perf_counter, event_entry);
+
+ while (&counter->event_entry != &ctx->event_list &&
+ &next_counter->event_entry != &next_ctx->event_list) {
+
+ __perf_counter_sync_stat(counter, next_counter);
+
+ counter = list_next_entry(counter, event_entry);
+ next_counter = list_next_entry(counter, event_entry);
+ }
+}
+
/*
* Called from scheduler to remove the counters of the current task,
* with interrupts disabled.
@@ -1061,6 +1140,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
+
+ perf_counter_sync_stat(ctx, next_ctx);
}
spin_unlock(&next_ctx->lock);
spin_unlock(&ctx->lock);
@@ -1348,9 +1429,56 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
}
/*
+ * Enable all of a task's counters that have been marked enable-on-exec.
+ * This expects task == current.
+ */
+static void perf_counter_enable_on_exec(struct task_struct *task)
+{
+ struct perf_counter_context *ctx;
+ struct perf_counter *counter;
+ unsigned long flags;
+ int enabled = 0;
+
+ local_irq_save(flags);
+ ctx = task->perf_counter_ctxp;
+ if (!ctx || !ctx->nr_counters)
+ goto out;
+
+ __perf_counter_task_sched_out(ctx);
+
+ spin_lock(&ctx->lock);
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (!counter->attr.enable_on_exec)
+ continue;
+ counter->attr.enable_on_exec = 0;
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ continue;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
+ enabled = 1;
+ }
+
+ /*
+ * Unclone this context if we enabled any counter.
+ */
+ if (enabled && ctx->parent_ctx) {
+ put_ctx(ctx->parent_ctx);
+ ctx->parent_ctx = NULL;
+ }
+
+ spin_unlock(&ctx->lock);
+
+ perf_counter_task_sched_in(task, smp_processor_id());
+ out:
+ local_irq_restore(flags);
+}
+
+/*
* Cross CPU call to read the hardware counter
*/
-static void __read(void *info)
+static void __perf_counter_read(void *info)
{
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
@@ -1372,7 +1500,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
*/
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu,
- __read, counter, 1);
+ __perf_counter_read, counter, 1);
} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
update_counter_times(counter);
}
@@ -1508,11 +1636,13 @@ static void free_counter(struct perf_counter *counter)
{
perf_pending_sync(counter);
- atomic_dec(&nr_counters);
- if (counter->attr.mmap)
- atomic_dec(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_dec(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_dec(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_dec(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_dec(&nr_comm_counters);
+ }
if (counter->destroy)
counter->destroy(counter);
@@ -1751,6 +1881,14 @@ int perf_counter_task_disable(void)
return 0;
}
+static int perf_counter_index(struct perf_counter *counter)
+{
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ return 0;
+
+ return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
@@ -1775,11 +1913,17 @@ void perf_counter_update_userpage(struct perf_counter *counter)
preempt_disable();
++userpg->lock;
barrier();
- userpg->index = counter->hw.idx;
+ userpg->index = perf_counter_index(counter);
userpg->offset = atomic64_read(&counter->count);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
+ userpg->time_enabled = counter->total_time_enabled +
+ atomic64_read(&counter->child_total_time_enabled);
+
+ userpg->time_running = counter->total_time_running +
+ atomic64_read(&counter->child_total_time_running);
+
barrier();
++userpg->lock;
preempt_enable();
@@ -2483,15 +2627,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
u32 cpu, reserved;
} cpu_entry;
- header.type = 0;
+ header.type = PERF_EVENT_SAMPLE;
header.size = sizeof(header);
- header.misc = PERF_EVENT_MISC_OVERFLOW;
+ header.misc = 0;
header.misc |= perf_misc_flags(data->regs);
if (sample_type & PERF_SAMPLE_IP) {
ip = perf_instruction_pointer(data->regs);
- header.type |= PERF_SAMPLE_IP;
header.size += sizeof(ip);
}
@@ -2500,7 +2643,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
tid_entry.pid = perf_counter_pid(counter, current);
tid_entry.tid = perf_counter_tid(counter, current);
- header.type |= PERF_SAMPLE_TID;
header.size += sizeof(tid_entry);
}
@@ -2510,34 +2652,25 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
*/
time = sched_clock();
- header.type |= PERF_SAMPLE_TIME;
header.size += sizeof(u64);
}
- if (sample_type & PERF_SAMPLE_ADDR) {
- header.type |= PERF_SAMPLE_ADDR;
+ if (sample_type & PERF_SAMPLE_ADDR)
header.size += sizeof(u64);
- }
- if (sample_type & PERF_SAMPLE_ID) {
- header.type |= PERF_SAMPLE_ID;
+ if (sample_type & PERF_SAMPLE_ID)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_CPU) {
- header.type |= PERF_SAMPLE_CPU;
header.size += sizeof(cpu_entry);
cpu_entry.cpu = raw_smp_processor_id();
}
- if (sample_type & PERF_SAMPLE_PERIOD) {
- header.type |= PERF_SAMPLE_PERIOD;
+ if (sample_type & PERF_SAMPLE_PERIOD)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_GROUP) {
- header.type |= PERF_SAMPLE_GROUP;
header.size += sizeof(u64) +
counter->nr_siblings * sizeof(group_entry);
}
@@ -2547,10 +2680,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
if (callchain) {
callchain_size = (1 + callchain->nr) * sizeof(u64);
-
- header.type |= PERF_SAMPLE_CALLCHAIN;
header.size += callchain_size;
- }
+ } else
+ header.size += sizeof(u64);
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2601,13 +2733,79 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
}
}
- if (callchain)
- perf_output_copy(&handle, callchain, callchain_size);
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (callchain)
+ perf_output_copy(&handle, callchain, callchain_size);
+ else {
+ u64 nr = 0;
+ perf_output_put(&handle, nr);
+ }
+ }
perf_output_end(&handle);
}
/*
+ * read event
+ */
+
+struct perf_read_event {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ u64 value;
+ u64 format[3];
+};
+
+static void
+perf_counter_read_event(struct perf_counter *counter,
+ struct task_struct *task)
+{
+ struct perf_output_handle handle;
+ struct perf_read_event event = {
+ .header = {
+ .type = PERF_EVENT_READ,
+ .misc = 0,
+ .size = sizeof(event) - sizeof(event.format),
+ },
+ .pid = perf_counter_pid(counter, task),
+ .tid = perf_counter_tid(counter, task),
+ .value = atomic64_read(&counter->count),
+ };
+ int ret, i = 0;
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_enabled;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_running;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_ID) {
+ u64 id;
+
+ event.header.size += sizeof(u64);
+ if (counter->parent)
+ id = counter->parent->id;
+ else
+ id = counter->id;
+
+ event.format[i++] = id;
+ }
+
+ ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
+ if (ret)
+ return;
+
+ perf_output_copy(&handle, &event, event.header.size);
+ perf_output_end(&handle);
+}
+
+/*
* fork tracking
*/
@@ -2798,6 +2996,9 @@ void perf_counter_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
+ if (task->perf_counter_ctxp)
+ perf_counter_enable_on_exec(task);
+
if (!atomic_read(&nr_comm_counters))
return;
@@ -3317,8 +3518,8 @@ out:
put_cpu_var(perf_cpu_context);
}
-void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+void __perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
@@ -3509,9 +3710,21 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
}
#endif
+atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+static void sw_perf_counter_destroy(struct perf_counter *counter)
+{
+ u64 event = counter->attr.config;
+
+ WARN_ON(counter->parent);
+
+ atomic_dec(&perf_swcounter_enabled[event]);
+}
+
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
const struct pmu *pmu = NULL;
+ u64 event = counter->attr.config;
/*
* Software counters (currently) can't in general distinguish
@@ -3520,7 +3733,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (counter->attr.config) {
+ switch (event) {
case PERF_COUNT_SW_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
@@ -3541,6 +3754,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
case PERF_COUNT_SW_CONTEXT_SWITCHES:
case PERF_COUNT_SW_CPU_MIGRATIONS:
+ if (!counter->parent) {
+ atomic_inc(&perf_swcounter_enabled[event]);
+ counter->destroy = sw_perf_counter_destroy;
+ }
pmu = &perf_ops_generic;
break;
}
@@ -3556,6 +3773,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
int cpu,
struct perf_counter_context *ctx,
struct perf_counter *group_leader,
+ struct perf_counter *parent_counter,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -3591,6 +3809,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
counter->ctx = ctx;
counter->oncpu = -1;
+ counter->parent = parent_counter;
+
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
@@ -3648,11 +3868,13 @@ done:
counter->pmu = pmu;
- atomic_inc(&nr_counters);
- if (counter->attr.mmap)
- atomic_inc(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_inc(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_inc(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_inc(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_inc(&nr_comm_counters);
+ }
return counter;
}
@@ -3815,7 +4037,7 @@ SYSCALL_DEFINE5(perf_counter_open,
}
counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
- GFP_KERNEL);
+ NULL, GFP_KERNEL);
ret = PTR_ERR(counter);
if (IS_ERR(counter))
goto err_put_context;
@@ -3881,7 +4103,8 @@ inherit_counter(struct perf_counter *parent_counter,
child_counter = perf_counter_alloc(&parent_counter->attr,
parent_counter->cpu, child_ctx,
- group_leader, GFP_KERNEL);
+ group_leader, parent_counter,
+ GFP_KERNEL);
if (IS_ERR(child_counter))
return child_counter;
get_ctx(child_ctx);
@@ -3904,12 +4127,6 @@ inherit_counter(struct perf_counter *parent_counter,
*/
add_counter_to_ctx(child_counter, child_ctx);
- child_counter->parent = parent_counter;
- /*
- * inherit into child's child as well:
- */
- child_counter->attr.inherit = 1;
-
/*
* Get a reference to the parent filp - we will fput it
* when the child counter exits. This is safe to do because
@@ -3953,10 +4170,14 @@ static int inherit_group(struct perf_counter *parent_counter,
}
static void sync_child_counter(struct perf_counter *child_counter,
- struct perf_counter *parent_counter)
+ struct task_struct *child)
{
+ struct perf_counter *parent_counter = child_counter->parent;
u64 child_val;
+ if (child_counter->attr.inherit_stat)
+ perf_counter_read_event(child_counter, child);
+
child_val = atomic64_read(&child_counter->count);
/*
@@ -3985,7 +4206,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
static void
__perf_counter_exit_task(struct perf_counter *child_counter,
- struct perf_counter_context *child_ctx)
+ struct perf_counter_context *child_ctx,
+ struct task_struct *child)
{
struct perf_counter *parent_counter;
@@ -3999,7 +4221,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
* counters need to be zapped - but otherwise linger.
*/
if (parent_counter) {
- sync_child_counter(child_counter, parent_counter);
+ sync_child_counter(child_counter, child);
free_counter(child_counter);
}
}
@@ -4061,7 +4283,7 @@ void perf_counter_exit_task(struct task_struct *child)
again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry)
- __perf_counter_exit_task(child_counter, child_ctx);
+ __perf_counter_exit_task(child_counter, child_ctx, child);
/*
* If the last counter was a group counter, it will have appended all
diff --git a/kernel/pid.c b/kernel/pid.c
index 31310b5d3f50..5fa1db48d8b7 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -36,6 +36,7 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
+#include <linux/kmemleak.h>
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -512,6 +513,12 @@ void __init pidhash_init(void)
pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
if (!pid_hash)
panic("Could not alloc pidhash!\n");
+ /*
+ * pid_hash contains references to allocated struct pid objects and it
+ * must be scanned by kmemleak to avoid false positives.
+ */
+ kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0,
+ GFP_KERNEL);
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 81d2e7464893..04b3a83d686f 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -298,8 +298,8 @@ int hibernation_snapshot(int platform_mode)
if (error)
return error;
- /* Free memory before shutting down devices. */
- error = swsusp_shrink_memory();
+ /* Preallocate image memory before shutting down devices. */
+ error = hibernate_preallocate_memory();
if (error)
goto Close;
@@ -315,6 +315,10 @@ int hibernation_snapshot(int platform_mode)
/* Control returns here after successful restore */
Resume_devices:
+ /* We may need to release the preallocated image pages here. */
+ if (error || !in_suspend)
+ swsusp_free();
+
dpm_resume_end(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
resume_console();
@@ -460,11 +464,11 @@ int hibernation_platform_enter(void)
error = hibernation_ops->prepare();
if (error)
- goto Platofrm_finish;
+ goto Platform_finish;
error = disable_nonboot_cpus();
if (error)
- goto Platofrm_finish;
+ goto Platform_finish;
local_irq_disable();
sysdev_suspend(PMSG_HIBERNATE);
@@ -476,7 +480,7 @@ int hibernation_platform_enter(void)
* We don't need to reenable the nonboot CPUs or resume consoles, since
* the system is going to be halted anyway.
*/
- Platofrm_finish:
+ Platform_finish:
hibernation_ops->finish();
dpm_suspend_noirq(PMSG_RESTORE);
@@ -578,7 +582,10 @@ int hibernate(void)
goto Thaw;
error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
- if (in_suspend && !error) {
+ if (error)
+ goto Thaw;
+
+ if (in_suspend) {
unsigned int flags = 0;
if (hibernation_mode == HIBERNATION_PLATFORM)
@@ -590,8 +597,8 @@ int hibernate(void)
power_down();
} else {
pr_debug("PM: Image restored successfully.\n");
- swsusp_free();
}
+
Thaw:
thaw_processes();
Finish:
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 26d5a26f82e3..46c5a26630a3 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void);
extern int create_basic_memory_bitmaps(void);
extern void free_basic_memory_bitmaps(void);
-extern int swsusp_shrink_memory(void);
+extern int hibernate_preallocate_memory(void);
/**
* Auxiliary structure used for reading the snapshot image data and
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 523a451b45d3..e27acf5fe085 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1033,6 +1033,25 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
+/*
+ * Numbers of normal and highmem page frames allocated for hibernation image
+ * before suspending devices.
+ */
+unsigned int alloc_normal, alloc_highmem;
+/*
+ * Memory bitmap used for marking saveable pages (during hibernation) or
+ * hibernation image pages (during restore)
+ */
+static struct memory_bitmap orig_bm;
+/*
+ * Memory bitmap used during hibernation for marking allocated page frames that
+ * will contain copies of saveable pages. During restore it is initially used
+ * for marking hibernation image pages, but then the set bits from it are
+ * duplicated in @orig_bm and it is released. On highmem systems it is next
+ * used for marking "safe" highmem pages, but it has to be reinitialized for
+ * this purpose.
+ */
+static struct memory_bitmap copy_bm;
/**
* swsusp_free - free pages allocated for the suspend.
@@ -1064,74 +1083,286 @@ void swsusp_free(void)
nr_meta_pages = 0;
restore_pblist = NULL;
buffer = NULL;
+ alloc_normal = 0;
+ alloc_highmem = 0;
}
+/* Helper functions used for the shrinking of memory. */
+
+#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
+
/**
- * swsusp_shrink_memory - Try to free as much memory as needed
+ * preallocate_image_pages - Allocate a number of pages for hibernation image
+ * @nr_pages: Number of page frames to allocate.
+ * @mask: GFP flags to use for the allocation.
*
- * ... but do not OOM-kill anyone
- *
- * Notice: all userland should be stopped before it is called, or
- * livelock is possible.
+ * Return value: Number of page frames actually allocated
*/
+static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
+{
+ unsigned long nr_alloc = 0;
+
+ while (nr_pages > 0) {
+ struct page *page;
+
+ page = alloc_image_page(mask);
+ if (!page)
+ break;
+ memory_bm_set_bit(&copy_bm, page_to_pfn(page));
+ if (PageHighMem(page))
+ alloc_highmem++;
+ else
+ alloc_normal++;
+ nr_pages--;
+ nr_alloc++;
+ }
+
+ return nr_alloc;
+}
+
+static unsigned long preallocate_image_memory(unsigned long nr_pages)
+{
+ return preallocate_image_pages(nr_pages, GFP_IMAGE);
+}
-#define SHRINK_BITE 10000
-static inline unsigned long __shrink_memory(long tmp)
+#ifdef CONFIG_HIGHMEM
+static unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
- if (tmp > SHRINK_BITE)
- tmp = SHRINK_BITE;
- return shrink_all_memory(tmp);
+ return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
+}
+
+/**
+ * __fraction - Compute (an approximation of) x * (multiplier / base)
+ */
+static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
+{
+ x *= multiplier;
+ do_div(x, base);
+ return (unsigned long)x;
+}
+
+static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
+ unsigned long highmem,
+ unsigned long total)
+{
+ unsigned long alloc = __fraction(nr_pages, highmem, total);
+
+ return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
+}
+#else /* CONFIG_HIGHMEM */
+static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
+{
+ return 0;
+}
+
+static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
+ unsigned long highmem,
+ unsigned long total)
+{
+ return 0;
+}
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ * free_unnecessary_pages - Release preallocated pages not needed for the image
+ */
+static void free_unnecessary_pages(void)
+{
+ unsigned long save_highmem, to_free_normal, to_free_highmem;
+
+ to_free_normal = alloc_normal - count_data_pages();
+ save_highmem = count_highmem_pages();
+ if (alloc_highmem > save_highmem) {
+ to_free_highmem = alloc_highmem - save_highmem;
+ } else {
+ to_free_highmem = 0;
+ to_free_normal -= save_highmem - alloc_highmem;
+ }
+
+ memory_bm_position_reset(&copy_bm);
+
+ while (to_free_normal > 0 && to_free_highmem > 0) {
+ unsigned long pfn = memory_bm_next_pfn(&copy_bm);
+ struct page *page = pfn_to_page(pfn);
+
+ if (PageHighMem(page)) {
+ if (!to_free_highmem)
+ continue;
+ to_free_highmem--;
+ alloc_highmem--;
+ } else {
+ if (!to_free_normal)
+ continue;
+ to_free_normal--;
+ alloc_normal--;
+ }
+ memory_bm_clear_bit(&copy_bm, pfn);
+ swsusp_unset_page_forbidden(page);
+ swsusp_unset_page_free(page);
+ __free_page(page);
+ }
+}
+
+/**
+ * minimum_image_size - Estimate the minimum acceptable size of an image
+ * @saveable: Number of saveable pages in the system.
+ *
+ * We want to avoid attempting to free too much memory too hard, so estimate the
+ * minimum acceptable size of a hibernation image to use as the lower limit for
+ * preallocating memory.
+ *
+ * We assume that the minimum image size should be proportional to
+ *
+ * [number of saveable pages] - [number of pages that can be freed in theory]
+ *
+ * where the second term is the sum of (1) reclaimable slab pages, (2) active
+ * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
+ * minus mapped file pages.
+ */
+static unsigned long minimum_image_size(unsigned long saveable)
+{
+ unsigned long size;
+
+ size = global_page_state(NR_SLAB_RECLAIMABLE)
+ + global_page_state(NR_ACTIVE_ANON)
+ + global_page_state(NR_INACTIVE_ANON)
+ + global_page_state(NR_ACTIVE_FILE)
+ + global_page_state(NR_INACTIVE_FILE)
+ - global_page_state(NR_FILE_MAPPED);
+
+ return saveable <= size ? 0 : saveable - size;
}
-int swsusp_shrink_memory(void)
+/**
+ * hibernate_preallocate_memory - Preallocate memory for hibernation image
+ *
+ * To create a hibernation image it is necessary to make a copy of every page
+ * frame in use. We also need a number of page frames to be free during
+ * hibernation for allocations made while saving the image and for device
+ * drivers, in case they need to allocate memory from their hibernation
+ * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES,
+ * respectively, both of which are rough estimates). To make this happen, we
+ * compute the total number of available page frames and allocate at least
+ *
+ * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES
+ *
+ * of them, which corresponds to the maximum size of a hibernation image.
+ *
+ * If image_size is set below the number following from the above formula,
+ * the preallocation of memory is continued until the total number of saveable
+ * pages in the system is below the requested image size or the minimum
+ * acceptable image size returned by minimum_image_size(), whichever is greater.
+ */
+int hibernate_preallocate_memory(void)
{
- long tmp;
struct zone *zone;
- unsigned long pages = 0;
- unsigned int i = 0;
- char *p = "-\\|/";
+ unsigned long saveable, size, max_size, count, highmem, pages = 0;
+ unsigned long alloc, save_highmem, pages_highmem;
struct timeval start, stop;
+ int error;
- printk(KERN_INFO "PM: Shrinking memory... ");
+ printk(KERN_INFO "PM: Preallocating image memory... ");
do_gettimeofday(&start);
- do {
- long size, highmem_size;
-
- highmem_size = count_highmem_pages();
- size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
- tmp = size;
- size += highmem_size;
- for_each_populated_zone(zone) {
- tmp += snapshot_additional_pages(zone);
- if (is_highmem(zone)) {
- highmem_size -=
- zone_page_state(zone, NR_FREE_PAGES);
- } else {
- tmp -= zone_page_state(zone, NR_FREE_PAGES);
- tmp += zone->lowmem_reserve[ZONE_NORMAL];
- }
- }
- if (highmem_size < 0)
- highmem_size = 0;
+ error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
+ if (error)
+ goto err_out;
- tmp += highmem_size;
- if (tmp > 0) {
- tmp = __shrink_memory(tmp);
- if (!tmp)
- return -ENOMEM;
- pages += tmp;
- } else if (size > image_size / PAGE_SIZE) {
- tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
- pages += tmp;
- }
- printk("\b%c", p[i++%4]);
- } while (tmp > 0);
+ error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
+ if (error)
+ goto err_out;
+
+ alloc_normal = 0;
+ alloc_highmem = 0;
+
+ /* Count the number of saveable data pages. */
+ save_highmem = count_highmem_pages();
+ saveable = count_data_pages();
+
+ /*
+ * Compute the total number of page frames we can use (count) and the
+ * number of pages needed for image metadata (size).
+ */
+ count = saveable;
+ saveable += save_highmem;
+ highmem = save_highmem;
+ size = 0;
+ for_each_populated_zone(zone) {
+ size += snapshot_additional_pages(zone);
+ if (is_highmem(zone))
+ highmem += zone_page_state(zone, NR_FREE_PAGES);
+ else
+ count += zone_page_state(zone, NR_FREE_PAGES);
+ }
+ count += highmem;
+ count -= totalreserve_pages;
+
+ /* Compute the maximum number of saveable pages to leave in memory. */
+ max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES;
+ size = DIV_ROUND_UP(image_size, PAGE_SIZE);
+ if (size > max_size)
+ size = max_size;
+ /*
+ * If the maximum is not less than the current number of saveable pages
+ * in memory, allocate page frames for the image and we're done.
+ */
+ if (size >= saveable) {
+ pages = preallocate_image_highmem(save_highmem);
+ pages += preallocate_image_memory(saveable - pages);
+ goto out;
+ }
+
+ /* Estimate the minimum size of the image. */
+ pages = minimum_image_size(saveable);
+ if (size < pages)
+ size = min_t(unsigned long, pages, max_size);
+
+ /*
+ * Let the memory management subsystem know that we're going to need a
+ * large number of page frames to allocate and make it free some memory.
+ * NOTE: If this is not done, performance will be hurt badly in some
+ * test cases.
+ */
+ shrink_all_memory((saveable - size) / 2);
+
+ /*
+ * The number of saveable pages in memory was too high, so apply some
+ * pressure to decrease it. First, make room for the largest possible
+ * image and fail if that doesn't work. Next, try to decrease the size
+ * of the image as much as indicated by 'size' using allocations from
+ * highmem and non-highmem zones separately.
+ */
+ pages_highmem = preallocate_image_highmem(highmem / 2);
+ alloc = (count - max_size) - pages_highmem;
+ pages = preallocate_image_memory(alloc);
+ if (pages < alloc)
+ goto err_out;
+ size = max_size - size;
+ alloc = size;
+ size = preallocate_highmem_fraction(size, highmem, count);
+ pages_highmem += size;
+ alloc -= size;
+ pages += preallocate_image_memory(alloc);
+ pages += pages_highmem;
+
+ /*
+ * We only need as many page frames for the image as there are saveable
+ * pages in memory, but we have allocated more. Release the excessive
+ * ones now.
+ */
+ free_unnecessary_pages();
+
+ out:
do_gettimeofday(&stop);
- printk("\bdone (%lu pages freed)\n", pages);
- swsusp_show_speed(&start, &stop, pages, "Freed");
+ printk(KERN_CONT "done (allocated %lu pages)\n", pages);
+ swsusp_show_speed(&start, &stop, pages, "Allocated");
return 0;
+
+ err_out:
+ printk(KERN_CONT "\n");
+ swsusp_free();
+ return -ENOMEM;
}
#ifdef CONFIG_HIGHMEM
@@ -1142,7 +1373,7 @@ int swsusp_shrink_memory(void)
static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
{
- unsigned int free_highmem = count_free_highmem_pages();
+ unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
if (free_highmem >= nr_highmem)
nr_highmem = 0;
@@ -1164,19 +1395,17 @@ count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
{
struct zone *zone;
- unsigned int free = 0, meta = 0;
+ unsigned int free = alloc_normal;
- for_each_zone(zone) {
- meta += snapshot_additional_pages(zone);
+ for_each_zone(zone)
if (!is_highmem(zone))
free += zone_page_state(zone, NR_FREE_PAGES);
- }
nr_pages += count_pages_for_highmem(nr_highmem);
- pr_debug("PM: Normal pages needed: %u + %u + %u, available pages: %u\n",
- nr_pages, PAGES_FOR_IO, meta, free);
+ pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
+ nr_pages, PAGES_FOR_IO, free);
- return free > nr_pages + PAGES_FOR_IO + meta;
+ return free > nr_pages + PAGES_FOR_IO;
}
#ifdef CONFIG_HIGHMEM
@@ -1198,7 +1427,7 @@ static inline int get_highmem_buffer(int safe_needed)
*/
static inline unsigned int
-alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
+alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
{
unsigned int to_alloc = count_free_highmem_pages();
@@ -1218,7 +1447,7 @@ alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
static inline int get_highmem_buffer(int safe_needed) { return 0; }
static inline unsigned int
-alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
+alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
@@ -1237,51 +1466,36 @@ static int
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem)
{
- int error;
-
- error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
- if (error)
- goto Free;
-
- error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
- if (error)
- goto Free;
+ int error = 0;
if (nr_highmem > 0) {
error = get_highmem_buffer(PG_ANY);
if (error)
- goto Free;
-
- nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
+ goto err_out;
+ if (nr_highmem > alloc_highmem) {
+ nr_highmem -= alloc_highmem;
+ nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
+ }
}
- while (nr_pages-- > 0) {
- struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
-
- if (!page)
- goto Free;
+ if (nr_pages > alloc_normal) {
+ nr_pages -= alloc_normal;
+ while (nr_pages-- > 0) {
+ struct page *page;
- memory_bm_set_bit(copy_bm, page_to_pfn(page));
+ page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
+ if (!page)
+ goto err_out;
+ memory_bm_set_bit(copy_bm, page_to_pfn(page));
+ }
}
+
return 0;
- Free:
+ err_out:
swsusp_free();
- return -ENOMEM;
+ return error;
}
-/* Memory bitmap used for marking saveable pages (during suspend) or the
- * suspend image pages (during resume)
- */
-static struct memory_bitmap orig_bm;
-/* Memory bitmap used on suspend for marking allocated pages that will contain
- * the copies of saveable pages. During resume it is initially used for
- * marking the suspend image pages, but then its set bits are duplicated in
- * @orig_bm and it is released. Next, on systems with high memory, it may be
- * used for marking "safe" highmem pages, but it has to be reinitialized for
- * this purpose.
- */
-static struct memory_bitmap copy_bm;
-
asmlinkage int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
diff --git a/kernel/printk.c b/kernel/printk.c
index b4d97b54c1ec..41fe60995530 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -37,6 +37,12 @@
#include <asm/uaccess.h>
/*
+ * for_each_console() allows you to iterate on each console
+ */
+#define for_each_console(con) \
+ for (con = console_drivers; con != NULL; con = con->next)
+
+/*
* Architectures can override it:
*/
void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
@@ -412,7 +418,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
- for (con = console_drivers; con; con = con->next) {
+ for_each_console(con) {
if ((con->flags & CON_ENABLED) && con->write &&
(cpu_online(smp_processor_id()) ||
(con->flags & CON_ANYTIME)))
@@ -544,7 +550,7 @@ static int have_callable_console(void)
{
struct console *con;
- for (con = console_drivers; con; con = con->next)
+ for_each_console(con)
if (con->flags & CON_ANYTIME)
return 1;
@@ -1082,7 +1088,7 @@ void console_unblank(void)
console_locked = 1;
console_may_schedule = 0;
- for (c = console_drivers; c != NULL; c = c->next)
+ for_each_console(c)
if ((c->flags & CON_ENABLED) && c->unblank)
c->unblank();
release_console_sem();
@@ -1097,7 +1103,7 @@ struct tty_driver *console_device(int *index)
struct tty_driver *driver = NULL;
acquire_console_sem();
- for (c = console_drivers; c != NULL; c = c->next) {
+ for_each_console(c) {
if (!c->device)
continue;
driver = c->device(c, index);
@@ -1134,25 +1140,49 @@ EXPORT_SYMBOL(console_start);
* to register the console printing procedure with printk() and to
* print any messages that were printed by the kernel before the
* console driver was initialized.
+ *
+ * This can happen pretty early during the boot process (because of
+ * early_printk) - sometimes before setup_arch() completes - be careful
+ * of what kernel features are used - they may not be initialised yet.
+ *
+ * There are two types of consoles - bootconsoles (early_printk) and
+ * "real" consoles (everything which is not a bootconsole) which are
+ * handled differently.
+ * - Any number of bootconsoles can be registered at any time.
+ * - As soon as a "real" console is registered, all bootconsoles
+ * will be unregistered automatically.
+ * - Once a "real" console is registered, any attempt to register a
+ * bootconsoles will be rejected
*/
-void register_console(struct console *console)
+void register_console(struct console *newcon)
{
int i;
unsigned long flags;
- struct console *bootconsole = NULL;
+ struct console *bcon = NULL;
- if (console_drivers) {
- if (console->flags & CON_BOOT)
- return;
- if (console_drivers->flags & CON_BOOT)
- bootconsole = console_drivers;
+ /*
+ * before we register a new CON_BOOT console, make sure we don't
+ * already have a valid console
+ */
+ if (console_drivers && newcon->flags & CON_BOOT) {
+ /* find the last or real console */
+ for_each_console(bcon) {
+ if (!(bcon->flags & CON_BOOT)) {
+ printk(KERN_INFO "Too late to register bootconsole %s%d\n",
+ newcon->name, newcon->index);
+ return;
+ }
+ }
}
- if (preferred_console < 0 || bootconsole || !console_drivers)
+ if (console_drivers && console_drivers->flags & CON_BOOT)
+ bcon = console_drivers;
+
+ if (preferred_console < 0 || bcon || !console_drivers)
preferred_console = selected_console;
- if (console->early_setup)
- console->early_setup();
+ if (newcon->early_setup)
+ newcon->early_setup();
/*
* See if we want to use this console driver. If we
@@ -1160,13 +1190,13 @@ void register_console(struct console *console)
* that registers here.
*/
if (preferred_console < 0) {
- if (console->index < 0)
- console->index = 0;
- if (console->setup == NULL ||
- console->setup(console, NULL) == 0) {
- console->flags |= CON_ENABLED;
- if (console->device) {
- console->flags |= CON_CONSDEV;
+ if (newcon->index < 0)
+ newcon->index = 0;
+ if (newcon->setup == NULL ||
+ newcon->setup(newcon, NULL) == 0) {
+ newcon->flags |= CON_ENABLED;
+ if (newcon->device) {
+ newcon->flags |= CON_CONSDEV;
preferred_console = 0;
}
}
@@ -1178,47 +1208,53 @@ void register_console(struct console *console)
*/
for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
i++) {
- if (strcmp(console_cmdline[i].name, console->name) != 0)
+ if (strcmp(console_cmdline[i].name, newcon->name) != 0)
continue;
- if (console->index >= 0 &&
- console->index != console_cmdline[i].index)
+ if (newcon->index >= 0 &&
+ newcon->index != console_cmdline[i].index)
continue;
- if (console->index < 0)
- console->index = console_cmdline[i].index;
+ if (newcon->index < 0)
+ newcon->index = console_cmdline[i].index;
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
if (console_cmdline[i].brl_options) {
- console->flags |= CON_BRL;
- braille_register_console(console,
+ newcon->flags |= CON_BRL;
+ braille_register_console(newcon,
console_cmdline[i].index,
console_cmdline[i].options,
console_cmdline[i].brl_options);
return;
}
#endif
- if (console->setup &&
- console->setup(console, console_cmdline[i].options) != 0)
+ if (newcon->setup &&
+ newcon->setup(newcon, console_cmdline[i].options) != 0)
break;
- console->flags |= CON_ENABLED;
- console->index = console_cmdline[i].index;
+ newcon->flags |= CON_ENABLED;
+ newcon->index = console_cmdline[i].index;
if (i == selected_console) {
- console->flags |= CON_CONSDEV;
+ newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
}
break;
}
- if (!(console->flags & CON_ENABLED))
+ if (!(newcon->flags & CON_ENABLED))
return;
- if (bootconsole && (console->flags & CON_CONSDEV)) {
- printk(KERN_INFO "console handover: boot [%s%d] -> real [%s%d]\n",
- bootconsole->name, bootconsole->index,
- console->name, console->index);
- unregister_console(bootconsole);
- console->flags &= ~CON_PRINTBUFFER;
+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
+ /* we need to iterate through twice, to make sure we print
+ * everything out, before we unregister the console(s)
+ */
+ printk(KERN_INFO "console handover:");
+ for_each_console(bcon)
+ printk("boot [%s%d] ", bcon->name, bcon->index);
+ printk(" -> real [%s%d]\n", newcon->name, newcon->index);
+ for_each_console(bcon)
+ unregister_console(bcon);
+ newcon->flags &= ~CON_PRINTBUFFER;
} else {
- printk(KERN_INFO "console [%s%d] enabled\n",
- console->name, console->index);
+ printk(KERN_INFO "%sconsole [%s%d] enabled\n",
+ (newcon->flags & CON_BOOT) ? "boot" : "" ,
+ newcon->name, newcon->index);
}
/*
@@ -1226,16 +1262,16 @@ void register_console(struct console *console)
* preferred driver at the head of the list.
*/
acquire_console_sem();
- if ((console->flags & CON_CONSDEV) || console_drivers == NULL) {
- console->next = console_drivers;
- console_drivers = console;
- if (console->next)
- console->next->flags &= ~CON_CONSDEV;
+ if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
+ newcon->next = console_drivers;
+ console_drivers = newcon;
+ if (newcon->next)
+ newcon->next->flags &= ~CON_CONSDEV;
} else {
- console->next = console_drivers->next;
- console_drivers->next = console;
+ newcon->next = console_drivers->next;
+ console_drivers->next = newcon;
}
- if (console->flags & CON_PRINTBUFFER) {
+ if (newcon->flags & CON_PRINTBUFFER) {
/*
* release_console_sem() will print out the buffered messages
* for us.
@@ -1287,11 +1323,13 @@ EXPORT_SYMBOL(unregister_console);
static int __init disable_boot_consoles(void)
{
- if (console_drivers != NULL) {
- if (console_drivers->flags & CON_BOOT) {
+ struct console *con;
+
+ for_each_console(con) {
+ if (con->flags & CON_BOOT) {
printk(KERN_INFO "turn off boot console %s%d\n",
- console_drivers->name, console_drivers->index);
- return unregister_console(console_drivers);
+ con->name, con->index);
+ return unregister_console(con);
}
}
return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 61c78b2c07ba..9a4184e04f29 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -152,7 +152,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
- return security_ptrace_may_access(task, mode);
+ return security_ptrace_access_check(task, mode);
}
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
deleted file mode 100644
index 0f2b0b311304..000000000000
--- a/kernel/rcuclassic.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2001
- *
- * Authors: Dipankar Sarma <dipankar@in.ibm.com>
- * Manfred Spraul <manfred@colorfullife.com>
- *
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/completion.h>
-#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/time.h>
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key rcu_lock_key;
-struct lockdep_map rcu_lock_map =
- STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
-EXPORT_SYMBOL_GPL(rcu_lock_map);
-#endif
-
-
-/* Definition for rcupdate control block. */
-static struct rcu_ctrlblk rcu_ctrlblk = {
- .cur = -300,
- .completed = -300,
- .pending = -300,
- .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
- .cpumask = CPU_BITS_NONE,
-};
-
-static struct rcu_ctrlblk rcu_bh_ctrlblk = {
- .cur = -300,
- .completed = -300,
- .pending = -300,
- .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
- .cpumask = CPU_BITS_NONE,
-};
-
-static DEFINE_PER_CPU(struct rcu_data, rcu_data);
-static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
-
-/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
- */
-void rcu_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- rdp->passed_quiesc = 1;
-}
-
-void rcu_bh_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
- rdp->passed_quiesc = 1;
-}
-
-static int blimit = 10;
-static int qhimark = 10000;
-static int qlowmark = 100;
-
-#ifdef CONFIG_SMP
-static void force_quiescent_state(struct rcu_data *rdp,
- struct rcu_ctrlblk *rcp)
-{
- int cpu;
- unsigned long flags;
-
- set_need_resched();
- spin_lock_irqsave(&rcp->lock, flags);
- if (unlikely(!rcp->signaled)) {
- rcp->signaled = 1;
- /*
- * Don't send IPI to itself. With irqs disabled,
- * rdp->cpu is the current cpu.
- *
- * cpu_online_mask is updated by the _cpu_down()
- * using __stop_machine(). Since we're in irqs disabled
- * section, __stop_machine() is not exectuting, hence
- * the cpu_online_mask is stable.
- *
- * However, a cpu might have been offlined _just_ before
- * we disabled irqs while entering here.
- * And rcu subsystem might not yet have handled the CPU_DEAD
- * notification, leading to the offlined cpu's bit
- * being set in the rcp->cpumask.
- *
- * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
- * sending smp_reschedule() to an offlined CPU.
- */
- for_each_cpu_and(cpu,
- to_cpumask(rcp->cpumask), cpu_online_mask) {
- if (cpu != rdp->cpu)
- smp_send_reschedule(cpu);
- }
- }
- spin_unlock_irqrestore(&rcp->lock, flags);
-}
-#else
-static inline void force_quiescent_state(struct rcu_data *rdp,
- struct rcu_ctrlblk *rcp)
-{
- set_need_resched();
-}
-#endif
-
-static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- long batch;
-
- head->next = NULL;
- smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
-
- /*
- * Determine the batch number of this callback.
- *
- * Using ACCESS_ONCE to avoid the following error when gcc eliminates
- * local variable "batch" and emits codes like this:
- * 1) rdp->batch = rcp->cur + 1 # gets old value
- * ......
- * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
- * then [*nxttail[0], *nxttail[1]) may contain callbacks
- * that batch# = rdp->batch, see the comment of struct rcu_data.
- */
- batch = ACCESS_ONCE(rcp->cur) + 1;
-
- if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
- /* process callbacks */
- rdp->nxttail[0] = rdp->nxttail[1];
- rdp->nxttail[1] = rdp->nxttail[2];
- if (rcu_batch_after(batch - 1, rdp->batch))
- rdp->nxttail[0] = rdp->nxttail[2];
- }
-
- rdp->batch = batch;
- *rdp->nxttail[2] = head;
- rdp->nxttail[2] = &head->next;
-
- if (unlikely(++rdp->qlen > qhimark)) {
- rdp->blimit = INT_MAX;
- force_quiescent_state(rdp, &rcu_ctrlblk);
- }
-}
-
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-
-static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
-{
- rcp->gp_start = jiffies;
- rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
-}
-
-static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
-{
- int cpu;
- long delta;
- unsigned long flags;
-
- /* Only let one CPU complain about others per time interval. */
-
- spin_lock_irqsave(&rcp->lock, flags);
- delta = jiffies - rcp->jiffies_stall;
- if (delta < 2 || rcp->cur != rcp->completed) {
- spin_unlock_irqrestore(&rcp->lock, flags);
- return;
- }
- rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
- spin_unlock_irqrestore(&rcp->lock, flags);
-
- /* OK, time to rat on our buddy... */
-
- printk(KERN_ERR "INFO: RCU detected CPU stalls:");
- for_each_possible_cpu(cpu) {
- if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
- printk(" %d", cpu);
- }
- printk(" (detected by %d, t=%ld jiffies)\n",
- smp_processor_id(), (long)(jiffies - rcp->gp_start));
-}
-
-static void print_cpu_stall(struct rcu_ctrlblk *rcp)
-{
- unsigned long flags;
-
- printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
- smp_processor_id(), jiffies,
- jiffies - rcp->gp_start);
- dump_stack();
- spin_lock_irqsave(&rcp->lock, flags);
- if ((long)(jiffies - rcp->jiffies_stall) >= 0)
- rcp->jiffies_stall =
- jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
- spin_unlock_irqrestore(&rcp->lock, flags);
- set_need_resched(); /* kick ourselves to get things going. */
-}
-
-static void check_cpu_stall(struct rcu_ctrlblk *rcp)
-{
- long delta;
-
- delta = jiffies - rcp->jiffies_stall;
- if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
- delta >= 0) {
-
- /* We haven't checked in, so go dump stack. */
- print_cpu_stall(rcp);
-
- } else if (rcp->cur != rcp->completed && delta >= 2) {
-
- /* They had two seconds to dump stack, so complain. */
- print_other_cpu_stall(rcp);
- }
-}
-
-#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
-{
-}
-
-static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- */
-void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
-
- head->func = func;
- local_irq_save(flags);
- __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
-/**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by rcu_read_lock() and
- * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
- * and rcu_read_unlock_bh(), if in process context. These may be nested.
- */
-void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
-
- head->func = func;
- local_irq_save(flags);
- __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(call_rcu_bh);
-
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed(void)
-{
- return rcu_ctrlblk.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed_bh(void)
-{
- return rcu_bh_ctrlblk.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
-
-/* Raises the softirq for processing rcu_callbacks. */
-static inline void raise_rcu_softirq(void)
-{
- raise_softirq(RCU_SOFTIRQ);
-}
-
-/*
- * Invoke the completed RCU callbacks. They are expected to be in
- * a per-cpu list.
- */
-static void rcu_do_batch(struct rcu_data *rdp)
-{
- unsigned long flags;
- struct rcu_head *next, *list;
- int count = 0;
-
- list = rdp->donelist;
- while (list) {
- next = list->next;
- prefetch(next);
- list->func(list);
- list = next;
- if (++count >= rdp->blimit)
- break;
- }
- rdp->donelist = list;
-
- local_irq_save(flags);
- rdp->qlen -= count;
- local_irq_restore(flags);
- if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
- rdp->blimit = blimit;
-
- if (!rdp->donelist)
- rdp->donetail = &rdp->donelist;
- else
- raise_rcu_softirq();
-}
-
-/*
- * Grace period handling:
- * The grace period handling consists out of two steps:
- * - A new grace period is started.
- * This is done by rcu_start_batch. The start is not broadcasted to
- * all cpus, they must pick this up by comparing rcp->cur with
- * rdp->quiescbatch. All cpus are recorded in the
- * rcu_ctrlblk.cpumask bitmap.
- * - All cpus must go through a quiescent state.
- * Since the start of the grace period is not broadcasted, at least two
- * calls to rcu_check_quiescent_state are required:
- * The first call just notices that a new grace period is running. The
- * following calls check if there was a quiescent state since the beginning
- * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
- * the bitmap is empty, then the grace period is completed.
- * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
- * period (if necessary).
- */
-
-/*
- * Register a new batch of callbacks, and start it up if there is currently no
- * active batch and the batch to be registered has not already occurred.
- * Caller must hold rcu_ctrlblk.lock.
- */
-static void rcu_start_batch(struct rcu_ctrlblk *rcp)
-{
- if (rcp->cur != rcp->pending &&
- rcp->completed == rcp->cur) {
- rcp->cur++;
- record_gp_stall_check_time(rcp);
-
- /*
- * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
- * Barrier Otherwise it can cause tickless idle CPUs to be
- * included in rcp->cpumask, which will extend graceperiods
- * unnecessarily.
- */
- smp_mb();
- cpumask_andnot(to_cpumask(rcp->cpumask),
- cpu_online_mask, nohz_cpu_mask);
-
- rcp->signaled = 0;
- }
-}
-
-/*
- * cpu went through a quiescent state since the beginning of the grace period.
- * Clear it from the cpu mask and complete the grace period if it was the last
- * cpu. Start another grace period if someone has further entries pending
- */
-static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
-{
- cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
- if (cpumask_empty(to_cpumask(rcp->cpumask))) {
- /* batch completed ! */
- rcp->completed = rcp->cur;
- rcu_start_batch(rcp);
- }
-}
-
-/*
- * Check if the cpu has gone through a quiescent state (say context
- * switch). If so and if it already hasn't done so in this RCU
- * quiescent cycle, then indicate that it has done so.
- */
-static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- unsigned long flags;
-
- if (rdp->quiescbatch != rcp->cur) {
- /* start new grace period: */
- rdp->qs_pending = 1;
- rdp->passed_quiesc = 0;
- rdp->quiescbatch = rcp->cur;
- return;
- }
-
- /* Grace period already completed for this cpu?
- * qs_pending is checked instead of the actual bitmap to avoid
- * cacheline trashing.
- */
- if (!rdp->qs_pending)
- return;
-
- /*
- * Was there a quiescent state since the beginning of the grace
- * period? If no, then exit and wait for the next call.
- */
- if (!rdp->passed_quiesc)
- return;
- rdp->qs_pending = 0;
-
- spin_lock_irqsave(&rcp->lock, flags);
- /*
- * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
- * during cpu startup. Ignore the quiescent state.
- */
- if (likely(rdp->quiescbatch == rcp->cur))
- cpu_quiet(rdp->cpu, rcp);
-
- spin_unlock_irqrestore(&rcp->lock, flags);
-}
-
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
- * locking requirements, the list it's pulling from has to belong to a cpu
- * which is dead and hence not processing interrupts.
- */
-static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
- struct rcu_head **tail, long batch)
-{
- unsigned long flags;
-
- if (list) {
- local_irq_save(flags);
- this_rdp->batch = batch;
- *this_rdp->nxttail[2] = list;
- this_rdp->nxttail[2] = tail;
- local_irq_restore(flags);
- }
-}
-
-static void __rcu_offline_cpu(struct rcu_data *this_rdp,
- struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
-{
- unsigned long flags;
-
- /*
- * if the cpu going offline owns the grace period
- * we can block indefinitely waiting for it, so flush
- * it here
- */
- spin_lock_irqsave(&rcp->lock, flags);
- if (rcp->cur != rcp->completed)
- cpu_quiet(rdp->cpu, rcp);
- rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
- rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
- spin_unlock(&rcp->lock);
-
- this_rdp->qlen += rdp->qlen;
- local_irq_restore(flags);
-}
-
-static void rcu_offline_cpu(int cpu)
-{
- struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
- struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
-
- __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
- &per_cpu(rcu_data, cpu));
- __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
- &per_cpu(rcu_bh_data, cpu));
- put_cpu_var(rcu_data);
- put_cpu_var(rcu_bh_data);
-}
-
-#else
-
-static void rcu_offline_cpu(int cpu)
-{
-}
-
-#endif
-
-/*
- * This does the RCU processing work from softirq context.
- */
-static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- unsigned long flags;
- long completed_snap;
-
- if (rdp->nxtlist) {
- local_irq_save(flags);
- completed_snap = ACCESS_ONCE(rcp->completed);
-
- /*
- * move the other grace-period-completed entries to
- * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
- */
- if (!rcu_batch_before(completed_snap, rdp->batch))
- rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
- else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
- rdp->nxttail[0] = rdp->nxttail[1];
-
- /*
- * the grace period for entries in
- * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
- * move these entries to donelist
- */
- if (rdp->nxttail[0] != &rdp->nxtlist) {
- *rdp->donetail = rdp->nxtlist;
- rdp->donetail = rdp->nxttail[0];
- rdp->nxtlist = *rdp->nxttail[0];
- *rdp->donetail = NULL;
-
- if (rdp->nxttail[1] == rdp->nxttail[0])
- rdp->nxttail[1] = &rdp->nxtlist;
- if (rdp->nxttail[2] == rdp->nxttail[0])
- rdp->nxttail[2] = &rdp->nxtlist;
- rdp->nxttail[0] = &rdp->nxtlist;
- }
-
- local_irq_restore(flags);
-
- if (rcu_batch_after(rdp->batch, rcp->pending)) {
- unsigned long flags2;
-
- /* and start it/schedule start if it's a new batch */
- spin_lock_irqsave(&rcp->lock, flags2);
- if (rcu_batch_after(rdp->batch, rcp->pending)) {
- rcp->pending = rdp->batch;
- rcu_start_batch(rcp);
- }
- spin_unlock_irqrestore(&rcp->lock, flags2);
- }
- }
-
- rcu_check_quiescent_state(rcp, rdp);
- if (rdp->donelist)
- rcu_do_batch(rdp);
-}
-
-static void rcu_process_callbacks(struct softirq_action *unused)
-{
- /*
- * Memory references from any prior RCU read-side critical sections
- * executed by the interrupted code must be see before any RCU
- * grace-period manupulations below.
- */
-
- smp_mb(); /* See above block comment. */
-
- __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
- __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
-
- /*
- * Memory references from any later RCU read-side critical sections
- * executed by the interrupted code must be see after any RCU
- * grace-period manupulations above.
- */
-
- smp_mb(); /* See above block comment. */
-}
-
-static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
-{
- /* Check for CPU stalls, if enabled. */
- check_cpu_stall(rcp);
-
- if (rdp->nxtlist) {
- long completed_snap = ACCESS_ONCE(rcp->completed);
-
- /*
- * This cpu has pending rcu entries and the grace period
- * for them has completed.
- */
- if (!rcu_batch_before(completed_snap, rdp->batch))
- return 1;
- if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
- rdp->nxttail[0] != rdp->nxttail[1])
- return 1;
- if (rdp->nxttail[0] != &rdp->nxtlist)
- return 1;
-
- /*
- * This cpu has pending rcu entries and the new batch
- * for then hasn't been started nor scheduled start
- */
- if (rcu_batch_after(rdp->batch, rcp->pending))
- return 1;
- }
-
- /* This cpu has finished callbacks to invoke */
- if (rdp->donelist)
- return 1;
-
- /* The rcu core waits for a quiescent state from the cpu */
- if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
- return 1;
-
- /* nothing to do */
- return 0;
-}
-
-/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, returning 1 if so. This function is part of the
- * RCU implementation; it is -not- an exported member of the RCU API.
- */
-int rcu_pending(int cpu)
-{
- return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
- __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
-}
-
-/*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
- */
-int rcu_needs_cpu(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
-
- return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
-}
-
-/*
- * Top-level function driving RCU grace-period detection, normally
- * invoked from the scheduler-clock interrupt. This function simply
- * increments counters that are read only from softirq by this same
- * CPU, so there are no memory barriers required.
- */
-void rcu_check_callbacks(int cpu, int user)
-{
- if (user ||
- (idle_cpu(cpu) && rcu_scheduler_active &&
- !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
-
- /*
- * Get here if this CPU took its interrupt from user
- * mode or from the idle loop, and if this is not a
- * nested interrupt. In this case, the CPU is in
- * a quiescent state, so count it.
- *
- * Also do a memory barrier. This is needed to handle
- * the case where writes from a preempt-disable section
- * of code get reordered into schedule() by this CPU's
- * write buffer. The memory barrier makes sure that
- * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
- * by other CPUs to happen after any such write.
- */
-
- smp_mb(); /* See above block comment. */
- rcu_qsctr_inc(cpu);
- rcu_bh_qsctr_inc(cpu);
-
- } else if (!in_softirq()) {
-
- /*
- * Get here if this CPU did not take its interrupt from
- * softirq, in other words, if it is not interrupting
- * a rcu_bh read-side critical section. This is an _bh
- * critical section, so count it. The memory barrier
- * is needed for the same reason as is the above one.
- */
-
- smp_mb(); /* See above block comment. */
- rcu_bh_qsctr_inc(cpu);
- }
- raise_rcu_softirq();
-}
-
-static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rcp->lock, flags);
- memset(rdp, 0, sizeof(*rdp));
- rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
- rdp->donetail = &rdp->donelist;
- rdp->quiescbatch = rcp->completed;
- rdp->qs_pending = 0;
- rdp->cpu = cpu;
- rdp->blimit = blimit;
- spin_unlock_irqrestore(&rcp->lock, flags);
-}
-
-static void __cpuinit rcu_online_cpu(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
-
- rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
- rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
-}
-
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rcu_online_cpu(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- rcu_offline_cpu(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata rcu_nb = {
- .notifier_call = rcu_cpu_notify,
-};
-
-/*
- * Initializes rcu mechanism. Assumed to be called early.
- * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
- * Note that rcu_qsctr and friends are implicitly
- * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
- */
-void __init __rcu_init(void)
-{
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
- rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
- (void *)(long)smp_processor_id());
- /* Register notifier for non-boot CPUs */
- register_cpu_notifier(&rcu_nb);
-}
-
-module_param(blimit, int, 0);
-module_param(qhimark, int, 0);
-module_param(qlowmark, int, 0);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a967c9feb90a..eae29c25fb14 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -98,6 +98,30 @@ void synchronize_rcu(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
+/**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full rcu_bh grace
+ * period has elapsed, in other words after all currently executing rcu_bh
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
+ * and may be nested.
+ */
+void synchronize_rcu_bh(void)
+{
+ struct rcu_synchronize rcu;
+
+ if (rcu_blocking_is_gp())
+ return;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_bh(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+
static void rcu_barrier_callback(struct rcu_head *notused)
{
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -129,6 +153,7 @@ static void rcu_barrier_func(void *type)
static inline void wait_migrated_callbacks(void)
{
wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
+ smp_mb(); /* In case we didn't sleep. */
}
/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 9b4a975a4b4a..b33db539a8ad 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -257,14 +257,14 @@ struct rcu_torture_ops {
void (*init)(void);
void (*cleanup)(void);
int (*readlock)(void);
- void (*readdelay)(struct rcu_random_state *rrsp);
+ void (*read_delay)(struct rcu_random_state *rrsp);
void (*readunlock)(int idx);
int (*completed)(void);
- void (*deferredfree)(struct rcu_torture *p);
+ void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*cb_barrier)(void);
int (*stats)(char *page);
- int irqcapable;
+ int irq_capable;
char *name;
};
static struct rcu_torture_ops *cur_ops = NULL;
@@ -320,7 +320,7 @@ rcu_torture_cb(struct rcu_head *p)
rp->rtort_mbtest = 0;
rcu_torture_free(rp);
} else
- cur_ops->deferredfree(rp);
+ cur_ops->deferred_free(rp);
}
static void rcu_torture_deferred_free(struct rcu_torture *p)
@@ -329,18 +329,18 @@ static void rcu_torture_deferred_free(struct rcu_torture *p)
}
static struct rcu_torture_ops rcu_ops = {
- .init = NULL,
- .cleanup = NULL,
- .readlock = rcu_torture_read_lock,
- .readdelay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferredfree = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .cb_barrier = rcu_barrier,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu"
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferred_free = rcu_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .cb_barrier = rcu_barrier,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu"
};
static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
@@ -370,18 +370,18 @@ static void rcu_sync_torture_init(void)
}
static struct rcu_torture_ops rcu_sync_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = rcu_torture_read_lock,
- .readdelay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu,
- .cb_barrier = NULL,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu_sync"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_sync"
};
/*
@@ -432,33 +432,33 @@ static void rcu_bh_torture_synchronize(void)
}
static struct rcu_torture_ops rcu_bh_ops = {
- .init = NULL,
- .cleanup = NULL,
- .readlock = rcu_bh_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferredfree = rcu_bh_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
- .cb_barrier = rcu_barrier_bh,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu_bh"
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_bh_torture_deferred_free,
+ .sync = rcu_bh_torture_synchronize,
+ .cb_barrier = rcu_barrier_bh,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh"
};
static struct rcu_torture_ops rcu_bh_sync_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = rcu_bh_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
- .cb_barrier = NULL,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu_bh_sync"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = rcu_bh_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh_sync"
};
/*
@@ -530,17 +530,17 @@ static int srcu_torture_stats(char *page)
}
static struct rcu_torture_ops srcu_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
- .readlock = srcu_torture_read_lock,
- .readdelay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize,
- .cb_barrier = NULL,
- .stats = srcu_torture_stats,
- .name = "srcu"
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock,
+ .read_delay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .completed = srcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = srcu_torture_stats,
+ .name = "srcu"
};
/*
@@ -574,32 +574,49 @@ static void sched_torture_synchronize(void)
}
static struct rcu_torture_ops sched_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = sched_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
- .deferredfree = rcu_sched_torture_deferred_free,
- .sync = sched_torture_synchronize,
- .cb_barrier = rcu_barrier_sched,
- .stats = NULL,
- .irqcapable = 1,
- .name = "sched"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferred_free = rcu_sched_torture_deferred_free,
+ .sync = sched_torture_synchronize,
+ .cb_barrier = rcu_barrier_sched,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "sched"
};
static struct rcu_torture_ops sched_ops_sync = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = sched_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = sched_torture_synchronize,
- .cb_barrier = NULL,
- .stats = NULL,
- .name = "sched_sync"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = sched_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .name = "sched_sync"
+};
+
+extern int rcu_expedited_torture_stats(char *page);
+
+static struct rcu_torture_ops sched_expedited_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_sched_expedited,
+ .cb_barrier = NULL,
+ .stats = rcu_expedited_torture_stats,
+ .irq_capable = 1,
+ .name = "sched_expedited"
};
/*
@@ -635,7 +652,7 @@ rcu_torture_writer(void *arg)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
- cur_ops->deferredfree(old_rp);
+ cur_ops->deferred_free(old_rp);
}
rcu_torture_current_version++;
oldbatch = cur_ops->completed();
@@ -700,7 +717,7 @@ static void rcu_torture_timer(unsigned long unused)
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
spin_lock(&rand_lock);
- cur_ops->readdelay(&rand);
+ cur_ops->read_delay(&rand);
n_rcu_torture_timers++;
spin_unlock(&rand_lock);
preempt_disable();
@@ -738,11 +755,11 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
set_user_nice(current, 19);
- if (irqreader && cur_ops->irqcapable)
+ if (irqreader && cur_ops->irq_capable)
setup_timer_on_stack(&t, rcu_torture_timer, 0);
do {
- if (irqreader && cur_ops->irqcapable) {
+ if (irqreader && cur_ops->irq_capable) {
if (!timer_pending(&t))
mod_timer(&t, 1);
}
@@ -757,7 +774,7 @@ rcu_torture_reader(void *arg)
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
- cur_ops->readdelay(&rand);
+ cur_ops->read_delay(&rand);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
@@ -778,7 +795,7 @@ rcu_torture_reader(void *arg)
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
rcutorture_shutdown_absorb("rcu_torture_reader");
- if (irqreader && cur_ops->irqcapable)
+ if (irqreader && cur_ops->irq_capable)
del_timer_sync(&t);
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
@@ -1078,6 +1095,7 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
+ &sched_expedited_ops,
&srcu_ops, &sched_ops, &sched_ops_sync, };
mutex_lock(&fullstop_mutex);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0dccfbba6d26..7717b95c2027 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1533,7 +1533,7 @@ void __init __rcu_init(void)
int j;
struct rcu_node *rnp;
- printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n");
+ printk(KERN_INFO "Hierarchical RCU implementation.\n");
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
@@ -1546,7 +1546,6 @@ void __init __rcu_init(void)
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
/* Register notifier for non-boot CPUs */
register_cpu_notifier(&rcu_nb);
- printk(KERN_WARNING "Experimental hierarchical RCU init done.\n");
}
module_param(blimit, int, 0);
diff --git a/kernel/resource.c b/kernel/resource.c
index ac5f3a36923f..78b087221c15 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -787,7 +787,7 @@ static int __init reserve_setup(char *str)
static struct resource reserve[MAXRESERVE];
for (;;) {
- int io_start, io_num;
+ unsigned int io_start, io_num;
int x = reserved;
if (get_option (&str, &io_start) != 2)
diff --git a/kernel/sched.c b/kernel/sched.c
index 7c9098d186e6..f0c95bf719da 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -318,12 +318,12 @@ struct task_group root_task_group;
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_cfs_rq);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
@@ -692,6 +692,7 @@ static inline int cpu_of(struct rq *rq)
#define this_rq() (&__get_cpu_var(runqueues))
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define raw_rq() (&__raw_get_cpu_var(runqueues))
inline void update_rq_clock(struct rq *rq)
{
@@ -2614,9 +2615,32 @@ void sched_fork(struct task_struct *p, int clone_flags)
set_task_cpu(p, cpu);
/*
- * Make sure we do not leak PI boosting priority to the child:
+ * Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
+
+ /*
+ * Revert to default priority/policy on fork if requested.
+ */
+ if (unlikely(p->sched_reset_on_fork)) {
+ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
+ p->policy = SCHED_NORMAL;
+
+ if (p->normal_prio < DEFAULT_PRIO)
+ p->prio = DEFAULT_PRIO;
+
+ if (PRIO_TO_NICE(p->static_prio) < 0) {
+ p->static_prio = NICE_TO_PRIO(0);
+ set_load_weight(p);
+ }
+
+ /*
+ * We don't need the reset flag anymore after the fork. It has
+ * fulfilled its duty:
+ */
+ p->sched_reset_on_fork = 0;
+ }
+
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
@@ -6100,17 +6124,25 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
unsigned long flags;
const struct sched_class *prev_class = p->sched_class;
struct rq *rq;
+ int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
- if (policy < 0)
+ if (policy < 0) {
+ reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
- else if (policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_NORMAL && policy != SCHED_BATCH &&
- policy != SCHED_IDLE)
- return -EINVAL;
+ } else {
+ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
+ policy &= ~SCHED_RESET_ON_FORK;
+
+ if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_NORMAL && policy != SCHED_BATCH &&
+ policy != SCHED_IDLE)
+ return -EINVAL;
+ }
+
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
@@ -6154,6 +6186,10 @@ recheck:
/* can't change other user's priorities */
if (!check_same_owner(p))
return -EPERM;
+
+ /* Normal users shall not reset the sched_reset_on_fork flag */
+ if (p->sched_reset_on_fork && !reset_on_fork)
+ return -EPERM;
}
if (user) {
@@ -6197,6 +6233,8 @@ recheck:
if (running)
p->sched_class->put_prev_task(rq, p);
+ p->sched_reset_on_fork = reset_on_fork;
+
oldprio = p->prio;
__setscheduler(rq, p, policy, param->sched_priority);
@@ -6313,14 +6351,15 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
- retval = p->policy;
+ retval = p->policy
+ | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
read_unlock(&tasklist_lock);
return retval;
}
/**
- * sys_sched_getscheduler - get the RT priority of a thread
+ * sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
@@ -6631,7 +6670,7 @@ EXPORT_SYMBOL(yield);
*/
void __sched io_schedule(void)
{
- struct rq *rq = &__raw_get_cpu_var(runqueues);
+ struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
@@ -6643,7 +6682,7 @@ EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
- struct rq *rq = &__raw_get_cpu_var(runqueues);
+ struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
@@ -7024,6 +7063,11 @@ fail:
return ret;
}
+#define RCU_MIGRATION_IDLE 0
+#define RCU_MIGRATION_NEED_QS 1
+#define RCU_MIGRATION_GOT_QS 2
+#define RCU_MIGRATION_MUST_SYNC 3
+
/*
* migration_thread - this is a highprio system thread that performs
* thread migration by bumping thread off CPU then 'pushing' onto
@@ -7031,6 +7075,7 @@ fail:
*/
static int migration_thread(void *data)
{
+ int badcpu;
int cpu = (long)data;
struct rq *rq;
@@ -7065,8 +7110,17 @@ static int migration_thread(void *data)
req = list_entry(head->next, struct migration_req, list);
list_del_init(head->next);
- spin_unlock(&rq->lock);
- __migrate_task(req->task, cpu, req->dest_cpu);
+ if (req->task != NULL) {
+ spin_unlock(&rq->lock);
+ __migrate_task(req->task, cpu, req->dest_cpu);
+ } else if (likely(cpu == (badcpu = smp_processor_id()))) {
+ req->dest_cpu = RCU_MIGRATION_GOT_QS;
+ spin_unlock(&rq->lock);
+ } else {
+ req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
+ spin_unlock(&rq->lock);
+ WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
+ }
local_irq_enable();
complete(&req->done);
@@ -10554,3 +10608,113 @@ struct cgroup_subsys cpuacct_subsys = {
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
+
+#ifndef CONFIG_SMP
+
+int rcu_expedited_torture_stats(char *page)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+void synchronize_sched_expedited(void)
+{
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#else /* #ifndef CONFIG_SMP */
+
+static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
+static DEFINE_MUTEX(rcu_sched_expedited_mutex);
+
+#define RCU_EXPEDITED_STATE_POST -2
+#define RCU_EXPEDITED_STATE_IDLE -1
+
+static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+
+int rcu_expedited_torture_stats(char *page)
+{
+ int cnt = 0;
+ int cpu;
+
+ cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
+ for_each_online_cpu(cpu) {
+ cnt += sprintf(&page[cnt], " %d:%d",
+ cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ return cnt;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+static long synchronize_sched_expedited_count;
+
+/*
+ * Wait for an rcu-sched grace period to elapse, but use "big hammer"
+ * approach to force grace period to end quickly. This consumes
+ * significant time on all CPUs, and is thus not recommended for
+ * any sort of common-case code.
+ *
+ * Note that it is illegal to call this function while holding any
+ * lock that is acquired by a CPU-hotplug notifier. Failing to
+ * observe this restriction will result in deadlock.
+ */
+void synchronize_sched_expedited(void)
+{
+ int cpu;
+ unsigned long flags;
+ bool need_full_sync = 0;
+ struct rq *rq;
+ struct migration_req *req;
+ long snap;
+ int trycount = 0;
+
+ smp_mb(); /* ensure prior mod happens before capturing snap. */
+ snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
+ get_online_cpus();
+ while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
+ put_online_cpus();
+ if (trycount++ < 10)
+ udelay(trycount * num_online_cpus());
+ else {
+ synchronize_sched();
+ return;
+ }
+ if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ return;
+ }
+ get_online_cpus();
+ }
+ rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
+ for_each_online_cpu(cpu) {
+ rq = cpu_rq(cpu);
+ req = &per_cpu(rcu_migration_req, cpu);
+ init_completion(&req->done);
+ req->task = NULL;
+ req->dest_cpu = RCU_MIGRATION_NEED_QS;
+ spin_lock_irqsave(&rq->lock, flags);
+ list_add(&req->list, &rq->migration_queue);
+ spin_unlock_irqrestore(&rq->lock, flags);
+ wake_up_process(rq->migration_thread);
+ }
+ for_each_online_cpu(cpu) {
+ rcu_expedited_state = cpu;
+ req = &per_cpu(rcu_migration_req, cpu);
+ rq = cpu_rq(cpu);
+ wait_for_completion(&req->done);
+ spin_lock_irqsave(&rq->lock, flags);
+ if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
+ need_full_sync = 1;
+ req->dest_cpu = RCU_MIGRATION_IDLE;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ }
+ rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+ mutex_unlock(&rcu_sched_expedited_mutex);
+ put_online_cpus();
+ if (need_full_sync)
+ synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#endif /* #else #ifndef CONFIG_SMP */
diff --git a/kernel/smp.c b/kernel/smp.c
index ad63d8501207..415ae238753b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -335,13 +335,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
generic_exec_single(cpu, data, wait);
}
-/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
-
-#ifndef arch_send_call_function_ipi_mask
-# define arch_send_call_function_ipi_mask(maskp) \
- arch_send_call_function_ipi(*(maskp))
-#endif
-
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 62e4ff9968b5..98e02328c67d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -335,7 +335,10 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
{
@@ -744,6 +747,14 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "panic_on_io_nmi",
+ .data = &panic_on_io_nmi,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = KERN_BOOTLOADER_TYPE,
.procname = "bootloader_type",
.data = &bootloader_type,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index c994530d166d..4cde8b9c716f 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -96,7 +96,7 @@ static DEFINE_MUTEX(show_mutex);
/*
* Collection status, active/inactive:
*/
-static int __read_mostly active;
+int __read_mostly timer_stats_active;
/*
* Beginning/end timestamps of measurement:
@@ -242,7 +242,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
struct entry *entry, input;
unsigned long flags;
- if (likely(!active))
+ if (likely(!timer_stats_active))
return;
lock = &per_cpu(lookup_lock, raw_smp_processor_id());
@@ -254,7 +254,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.timer_flag = timer_flag;
spin_lock_irqsave(lock, flags);
- if (!active)
+ if (!timer_stats_active)
goto out_unlock;
entry = tstat_lookup(&input, comm);
@@ -290,7 +290,7 @@ static int tstats_show(struct seq_file *m, void *v)
/*
* If still active then calculate up to now:
*/
- if (active)
+ if (timer_stats_active)
time_stop = ktime_get();
time = ktime_sub(time_stop, time_start);
@@ -368,18 +368,18 @@ static ssize_t tstats_write(struct file *file, const char __user *buf,
mutex_lock(&show_mutex);
switch (ctl[0]) {
case '0':
- if (active) {
- active = 0;
+ if (timer_stats_active) {
+ timer_stats_active = 0;
time_stop = ktime_get();
sync_access();
}
break;
case '1':
- if (!active) {
+ if (!timer_stats_active) {
reset_entries();
time_start = ktime_get();
smp_mb();
- active = 1;
+ timer_stats_active = 1;
}
break;
default:
diff --git a/kernel/timer.c b/kernel/timer.c
index 54d3912f8cad..0b36b9e5cc8b 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -380,6 +380,8 @@ static void timer_stats_account_timer(struct timer_list *timer)
{
unsigned int flag = 0;
+ if (likely(!timer->start_site))
+ return;
if (unlikely(tbase_get_deferrable(timer->base)))
flag |= TIMER_STATS_FLAG_DEFERRABLE;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 1551f47e7669..019f380fd764 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -226,13 +226,13 @@ config BOOT_TRACER
the timings of the initcalls and traces key events and the identity
of tasks that can cause boot delays, such as context-switches.
- Its aim is to be parsed by the /scripts/bootgraph.pl tool to
+ Its aim is to be parsed by the scripts/bootgraph.pl tool to
produce pretty graphics about boot inefficiencies, giving a visual
representation of the delays during initcalls - but the raw
/debug/tracing/trace text output is readable too.
- You must pass in ftrace=initcall to the kernel command line
- to enable this on bootup.
+ You must pass in initcall_debug and ftrace=initcall to the kernel
+ command line to enable this on bootup.
config TRACE_BRANCH_PROFILING
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3718d55fb4c3..bce9e01a29c8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
- rec++;
+ if (idx != 0)
+ rec++;
+
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
@@ -1417,10 +1419,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
+
+ if (!(iter->flags & FTRACE_ITER_HASH))
+ *pos = 0;
iter->flags |= FTRACE_ITER_HASH;
- return t_hash_next(m, p, pos);
+ iter->hidx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_hash_next(m, p, &l);
+ if (!p)
+ break;
+ }
+ return p;
}
static int t_hash_show(struct seq_file *m, void *v)
@@ -1467,8 +1479,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
iter->pg = iter->pg->next;
iter->idx = 0;
goto retry;
- } else {
- iter->idx = -1;
}
} else {
rec = &iter->pg->records[iter->idx++];
@@ -1497,6 +1507,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
mutex_lock(&ftrace_lock);
/*
@@ -1508,23 +1519,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
- (*pos)++;
return iter;
}
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_start(m, pos);
- if (*pos > 0) {
- if (iter->idx < 0)
- return p;
- (*pos)--;
- iter->idx--;
+ iter->pg = ftrace_pages_start;
+ iter->idx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_next(m, p, &l);
+ if (!p)
+ break;
}
- p = t_next(m, p, pos);
-
- if (!p)
+ if (!p && iter->flags & FTRACE_ITER_FILTER)
return t_hash_start(m, pos);
return p;
@@ -2500,32 +2509,31 @@ int ftrace_graph_count;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
{
unsigned long *array = m->private;
- int index = *pos;
- (*pos)++;
-
- if (index >= ftrace_graph_count)
+ if (*pos >= ftrace_graph_count)
return NULL;
+ return &array[*pos];
+}
- return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return __g_next(m, pos);
}
static void *g_start(struct seq_file *m, loff_t *pos)
{
- void *p = NULL;
-
mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos)
return (void *)1;
- p = g_next(m, p, pos);
-
- return p;
+ return __g_next(m, pos);
}
static void g_stop(struct seq_file *m, void *p)
@@ -3152,10 +3160,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
- if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+ if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out;
- last_ftrace_enabled = ftrace_enabled;
+ last_ftrace_enabled = !!ftrace_enabled;
if (ftrace_enabled) {
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 1edaa9516e81..74903b62bcb6 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -239,12 +239,52 @@ struct kmemtrace_user_event_alloc {
};
static enum print_line_t
-kmemtrace_print_alloc_user(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_alloc_entry *entry;
+ int ret;
+
+ trace_assign_type(entry, iter->ent);
+
+ ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
+ "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
+ entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
+ (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
+ (unsigned long)entry->gfp_flags, entry->node);
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
{
- struct kmemtrace_user_event_alloc *ev_alloc;
struct trace_seq *s = &iter->seq;
+ struct kmemtrace_free_entry *entry;
+ int ret;
+
+ trace_assign_type(entry, iter->ent);
+
+ ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
+ entry->type_id, (void *)entry->call_site,
+ (unsigned long)entry->ptr);
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_alloc_user_bin(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_alloc_entry *entry;
struct kmemtrace_user_event *ev;
+ struct kmemtrace_user_event_alloc *ev_alloc;
+
+ trace_assign_type(entry, iter->ent);
ev = trace_seq_reserve(s, sizeof(*ev));
if (!ev)
@@ -271,12 +311,14 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter,
}
static enum print_line_t
-kmemtrace_print_free_user(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_user_bin(struct trace_iterator *iter, int flags)
{
struct trace_seq *s = &iter->seq;
+ struct kmemtrace_free_entry *entry;
struct kmemtrace_user_event *ev;
+ trace_assign_type(entry, iter->ent);
+
ev = trace_seq_reserve(s, sizeof(*ev));
if (!ev)
return TRACE_TYPE_PARTIAL_LINE;
@@ -294,12 +336,14 @@ kmemtrace_print_free_user(struct trace_iterator *iter,
/* The two other following provide a more minimalistic output */
static enum print_line_t
-kmemtrace_print_alloc_compress(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc_compress(struct trace_iterator *iter)
{
+ struct kmemtrace_alloc_entry *entry;
struct trace_seq *s = &iter->seq;
int ret;
+ trace_assign_type(entry, iter->ent);
+
/* Alloc entry */
ret = trace_seq_printf(s, " + ");
if (!ret)
@@ -362,12 +406,14 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter,
}
static enum print_line_t
-kmemtrace_print_free_compress(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_compress(struct trace_iterator *iter)
{
+ struct kmemtrace_free_entry *entry;
struct trace_seq *s = &iter->seq;
int ret;
+ trace_assign_type(entry, iter->ent);
+
/* Free entry */
ret = trace_seq_printf(s, " - ");
if (!ret)
@@ -421,32 +467,31 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
- switch (entry->type) {
- case TRACE_KMEM_ALLOC: {
- struct kmemtrace_alloc_entry *field;
-
- trace_assign_type(field, entry);
- if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
- return kmemtrace_print_alloc_compress(iter, field);
- else
- return kmemtrace_print_alloc_user(iter, field);
- }
-
- case TRACE_KMEM_FREE: {
- struct kmemtrace_free_entry *field;
-
- trace_assign_type(field, entry);
- if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
- return kmemtrace_print_free_compress(iter, field);
- else
- return kmemtrace_print_free_user(iter, field);
- }
+ if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
+ return TRACE_TYPE_UNHANDLED;
+ switch (entry->type) {
+ case TRACE_KMEM_ALLOC:
+ return kmemtrace_print_alloc_compress(iter);
+ case TRACE_KMEM_FREE:
+ return kmemtrace_print_free_compress(iter);
default:
return TRACE_TYPE_UNHANDLED;
}
}
+static struct trace_event kmem_trace_alloc = {
+ .type = TRACE_KMEM_ALLOC,
+ .trace = kmemtrace_print_alloc_user,
+ .binary = kmemtrace_print_alloc_user_bin,
+};
+
+static struct trace_event kmem_trace_free = {
+ .type = TRACE_KMEM_FREE,
+ .trace = kmemtrace_print_free_user,
+ .binary = kmemtrace_print_free_user_bin,
+};
+
static struct tracer kmem_tracer __read_mostly = {
.name = "kmemtrace",
.init = kmem_trace_init,
@@ -463,6 +508,21 @@ void kmemtrace_init(void)
static int __init init_kmem_tracer(void)
{
- return register_tracer(&kmem_tracer);
+ if (!register_ftrace_event(&kmem_trace_alloc)) {
+ pr_warning("Warning: could not register kmem events\n");
+ return 1;
+ }
+
+ if (!register_ftrace_event(&kmem_trace_free)) {
+ pr_warning("Warning: could not register kmem events\n");
+ return 1;
+ }
+
+ if (!register_tracer(&kmem_tracer)) {
+ pr_warning("Warning: could not register the kmem tracer\n");
+ return 1;
+ }
+
+ return 0;
}
device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac2638258..bf27bb7a63e2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
}
+#ifdef CONFIG_TRACING
+
#define TRACE_RECURSIVE_DEPTH 16
static int trace_recursive_lock(void)
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void)
current->trace_recursion--;
}
+#else
+
+#define trace_recursive_lock() (0)
+#define trace_recursive_unlock() do { } while (0)
+
+#endif
+
static DEFINE_PER_CPU(int, rb_need_resched);
/**
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+#ifdef CONFIG_TRACING
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void)
}
fs_initcall(rb_init_debugfs);
+#endif
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 076fa6f0ee48..e5060a82811d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -49,7 +49,7 @@ unsigned long __read_mostly tracing_thresh;
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
*/
-static int ring_buffer_expanded;
+int ring_buffer_expanded;
/*
* We need to change this state when a selftest is running.
@@ -63,7 +63,7 @@ static bool __read_mostly tracing_selftest_running;
/*
* If a tracer is running, we do not want to run SELFTEST.
*/
-static bool __read_mostly tracing_selftest_disabled;
+bool __read_mostly tracing_selftest_disabled;
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
@@ -284,13 +284,12 @@ void trace_wake_up(void)
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
- int ret;
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &buf_size);
+ buf_size = memparse(str, &str);
/* nr_entries can not be zero */
- if (ret < 0 || buf_size == 0)
+ if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
@@ -1919,11 +1918,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (current_trace)
*iter->trace = *current_trace;
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- cpumask_clear(iter->started);
-
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
@@ -2053,25 +2050,23 @@ static int tracing_open(struct inode *inode, struct file *file)
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t = v;
(*pos)++;
if (t)
t = t->next;
- m->private = t;
-
return t;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t;
loff_t l = 0;
mutex_lock(&trace_types_lock);
- for (; t && l < *pos; t = t_next(m, t, &l))
+ for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
;
return t;
@@ -2107,18 +2102,10 @@ static struct seq_operations show_traces_seq_ops = {
static int show_traces_open(struct inode *inode, struct file *file)
{
- int ret;
-
if (tracing_disabled)
return -ENODEV;
- ret = seq_open(file, &show_traces_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = trace_types;
- }
-
- return ret;
+ return seq_open(file, &show_traces_seq_ops);
}
static ssize_t
@@ -4289,7 +4276,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
- if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */
@@ -4300,7 +4287,6 @@ __init static int tracer_alloc_buffers(void)
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
- cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4771f8..52eb0d8dcd75 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -517,6 +517,9 @@ extern unsigned long ftrace_update_tot_cnt;
extern int DYN_FTRACE_TEST_NAME(void);
#endif
+extern int ring_buffer_expanded;
+extern bool tracing_selftest_disabled;
+
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
struct trace_array *tr);
@@ -597,6 +600,7 @@ print_graph_function(struct trace_iterator *iter)
extern struct pid *ftrace_pid_trace;
+#ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task)
{
if (!ftrace_pid_trace)
@@ -604,6 +608,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
return test_tsk_trace_trace(task);
}
+#else
+static inline int ftrace_trace_task(struct task_struct *task)
+{
+ return 1;
+}
+#endif
/*
* trace_iterator_flags is an enumeration that defines bit
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
index 5e32e375134d..6db005e12487 100644
--- a/kernel/trace/trace_event_types.h
+++ b/kernel/trace/trace_event_types.h
@@ -26,6 +26,9 @@ TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
ftrace_graph_ret_entry, ignore,
TRACE_STRUCT(
TRACE_FIELD(unsigned long, ret.func, func)
+ TRACE_FIELD(unsigned long long, ret.calltime, calltime)
+ TRACE_FIELD(unsigned long long, ret.rettime, rettime)
+ TRACE_FIELD(unsigned long, ret.overrun, overrun)
TRACE_FIELD(int, ret.depth, depth)
),
TP_RAW_FMT("<-- %lx (%d)")
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aa08be69a1b6..66848aa8213e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -17,6 +17,8 @@
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <asm/setup.h>
+
#include "trace_output.h"
#define TRACE_SYSTEM "TRACE_SYSTEM"
@@ -300,10 +302,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return t_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = t_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static void *
@@ -332,10 +342,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
static void *s_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return s_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = s_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static int t_show(struct seq_file *m, void *v)
@@ -1117,6 +1135,18 @@ struct notifier_block trace_module_nb = {
extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[];
+static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
+
+static __init int setup_trace_event(char *str)
+{
+ strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
+ ring_buffer_expanded = 1;
+ tracing_selftest_disabled = 1;
+
+ return 1;
+}
+__setup("trace_event=", setup_trace_event);
+
static __init int event_trace_init(void)
{
struct ftrace_event_call *call;
@@ -1124,6 +1154,8 @@ static __init int event_trace_init(void)
struct dentry *entry;
struct dentry *d_events;
int ret;
+ char *buf = bootup_event_buf;
+ char *token;
d_tracer = tracing_init_dentry();
if (!d_tracer)
@@ -1169,6 +1201,19 @@ static __init int event_trace_init(void)
&ftrace_event_format_fops);
}
+ while (true) {
+ token = strsep(&buf, ",");
+
+ if (!token)
+ break;
+ if (!*token)
+ continue;
+
+ ret = ftrace_set_clr_event(token, 1);
+ if (ret)
+ pr_warning("Failed to enable trace event: %s\n", token);
+ }
+
ret = register_module_notifier(&trace_module_nb);
if (ret)
pr_warning("Failed to register trace events module notifier\n");
@@ -1318,7 +1363,7 @@ static __init void event_trace_self_tests(void)
#ifdef CONFIG_FUNCTION_TRACER
-static DEFINE_PER_CPU(atomic_t, test_event_disable);
+static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void
function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1334,7 +1379,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
pc = preempt_count();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
+ disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
if (disabled != 1)
goto out;
@@ -1352,7 +1397,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(event, flags, pc);
out:
- atomic_dec(&per_cpu(test_event_disable, cpu));
+ atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
ftrace_preempt_enable(resched);
}
@@ -1376,10 +1421,10 @@ static __init void event_trace_self_test_with_function(void)
static __init int event_trace_self_tests_init(void)
{
-
- event_trace_self_tests();
-
- event_trace_self_test_with_function();
+ if (!tracing_selftest_disabled) {
+ event_trace_self_tests();
+ event_trace_self_test_with_function();
+ }
return 0;
}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 90f134764837..7402144bff21 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -302,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
if (count == -1)
seq_printf(m, ":unlimited\n");
else
- seq_printf(m, ":count=%ld", count);
- seq_putc(m, '\n');
+ seq_printf(m, ":count=%ld\n", count);
return 0;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 7938f3ae93e3..e0c2545622e8 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -27,8 +27,7 @@ void trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
- s->buffer[len] = 0;
- seq_puts(m, s->buffer);
+ seq_write(m, s->buffer, len);
trace_seq_init(s);
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 9bece9687b62..7b6278110827 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+t_start(struct seq_file *m, loff_t *pos)
{
- const char **fmt = m->private;
- const char **next = fmt;
-
- (*pos)++;
+ const char **fmt = __start___trace_bprintk_fmt + *pos;
if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
return NULL;
-
- next = fmt;
- m->private = ++next;
-
return fmt;
}
-static void *t_start(struct seq_file *m, loff_t *pos)
+static void *t_next(struct seq_file *m, void * v, loff_t *pos)
{
- return t_next(m, NULL, pos);
+ (*pos)++;
+ return t_start(m, pos);
}
static int t_show(struct seq_file *m, void *v)
@@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = {
static int
ftrace_formats_open(struct inode *inode, struct file *file)
{
- int ret;
-
- ret = seq_open(file, &show_format_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
-
- m->private = __start___trace_bprintk_fmt;
- }
- return ret;
+ return seq_open(file, &show_format_seq_ops);
}
static const struct file_operations ftrace_formats_fops = {
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 2d7aebd71dbd..e644af910124 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -326,10 +326,10 @@ stack_trace_sysctl(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
if (ret || !write ||
- (last_stack_tracer_enabled == stack_tracer_enabled))
+ (last_stack_tracer_enabled == !!stack_tracer_enabled))
goto out;
- last_stack_tracer_enabled = stack_tracer_enabled;
+ last_stack_tracer_enabled = !!stack_tracer_enabled;
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index c00643733f4c..e66f5e493342 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -199,17 +199,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
mutex_lock(&session->stat_mutex);
/* If we are in the beginning of the file, print the headers */
- if (!*pos && session->ts->stat_headers) {
- (*pos)++;
+ if (!*pos && session->ts->stat_headers)
return SEQ_START_TOKEN;
- }
node = rb_first(&session->stat_root);
for (i = 0; node && i < *pos; i++)
node = rb_next(node);
- (*pos)++;
-
return node;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c32b1a1a06e..e30274f6b840 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -336,6 +336,26 @@ config SLUB_STATS
out which slabs are relevant to a particular load.
Try running: slabinfo -DA
+config SLQB_DEBUG
+ default y
+ bool "Enable SLQB debugging support"
+ depends on SLQB
+
+config SLQB_DEBUG_ON
+ default n
+ bool "SLQB debugging on by default"
+ depends on SLQB_DEBUG
+
+config SLQB_SYSFS
+ bool "Create SYSFS entries for slab caches"
+ default n
+ depends on SLQB
+
+config SLQB_STATS
+ bool "Enable SLQB performance statistics"
+ default n
+ depends on SLQB_SYSFS
+
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
@@ -359,6 +379,18 @@ config DEBUG_KMEMLEAK
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
+config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
+ int "Maximum kmemleak early log entries"
+ depends on DEBUG_KMEMLEAK
+ range 200 2000
+ default 400
+ help
+ Kmemleak must track all the memory allocations to avoid
+ reporting false positives. Since memory may be allocated or
+ freed before kmemleak is initialised, an early log buffer is
+ used to store these actions. If kmemleak reports "early log
+ buffer exceeded", please increase this value.
+
config DEBUG_KMEMLEAK_TEST
tristate "Simple test for the kernel memory leak detector"
depends on DEBUG_KMEMLEAK
@@ -778,6 +810,21 @@ config DEBUG_BLOCK_EXT_DEVT
Say N if you are unsure.
+config DEBUG_FORCE_WEAK_PER_CPU
+ bool "Force weak per-cpu definitions"
+ depends on DEBUG_KERNEL
+ help
+ s390 and alpha require percpu variables in modules to be
+ defined weak to work around addressing range issue which
+ puts the following two restrictions on percpu variable
+ definitions.
+
+ 1. percpu symbols must be unique whether static or not
+ 2. percpu variables can't be defined inside a function
+
+ To ensure that generic code follows the above rules, this
+ option forces all percpu variables to be defined as weak.
+
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
diff --git a/lib/checksum.c b/lib/checksum.c
index b2e2fd468461..097508732f34 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -37,7 +37,8 @@
#include <asm/byteorder.h>
-static inline unsigned short from32to16(unsigned long x)
+#ifndef do_csum
+static inline unsigned short from32to16(unsigned int x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
@@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x)
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd, count;
- unsigned long result = 0;
+ unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
#ifdef __LITTLE_ENDIAN
- result = *buff;
-#else
result += (*buff << 8);
+#else
+ result = *buff;
#endif
len--;
buff++;
@@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len)
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
- unsigned long carry = 0;
+ unsigned int carry = 0;
do {
- unsigned long w = *(unsigned int *) buff;
+ unsigned int w = *(unsigned int *) buff;
count--;
buff += 4;
result += carry;
@@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
out:
return result;
}
+#endif
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 3b93129a968c..c9187fed0b93 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -716,7 +716,7 @@ void dma_debug_init(u32 num_entries)
for (i = 0; i < HASH_SIZE; ++i) {
INIT_LIST_HEAD(&dma_entry_hash[i].list);
- dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&dma_entry_hash[i].lock);
}
if (dma_debug_fs_init() != 0) {
@@ -862,7 +862,7 @@ static inline bool overlap(void *addr, u64 size, void *start, void *end)
return ((addr >= start && addr < end) ||
(addr2 >= start && addr2 < end) ||
- ((addr < start) && (addr2 >= end)));
+ ((addr < start) && (addr2 > end)));
}
static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
diff --git a/mm/Makefile b/mm/Makefile
index 5e0bd6426693..b2b96c27d520 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -8,7 +8,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
vmalloc.o
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
- maccess.o page_alloc.o page-writeback.o pdflush.o \
+ maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o $(mmu-y)
@@ -28,12 +28,13 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
+obj-$(CONFIG_SLQB) += slqb.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
-ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
obj-$(CONFIG_SMP) += percpu.o
else
obj-$(CONFIG_SMP) += allocpercpu.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index dfdee6a47359..df34ceae0c67 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -5,6 +5,8 @@
*/
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <asm/sections.h>
#ifndef cache_line_size
#define cache_line_size() L1_CACHE_BYTES
@@ -147,3 +149,29 @@ void free_percpu(void *__pdata)
kfree(__percpu_disguise(__pdata));
}
EXPORT_SYMBOL_GPL(free_percpu);
+
+/*
+ * Generic percpu area setup.
+ */
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(__per_cpu_offset);
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long size, i;
+ char *ptr;
+ unsigned long nr_possible_cpus = num_possible_cpus();
+
+ /* Copy section for each CPU (we discard the original) */
+ size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
+ ptr = alloc_bootmem_pages(size * nr_possible_cpus);
+
+ for_each_possible_cpu(i) {
+ __per_cpu_offset[i] = ptr - __per_cpu_start;
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ ptr += size;
+ }
+}
+#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 493b468a5035..75e6c47752c4 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1,8 +1,11 @@
#include <linux/wait.h>
#include <linux/backing-dev.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
@@ -14,6 +17,7 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
EXPORT_SYMBOL(default_unplug_io_fn);
struct backing_dev_info default_backing_dev_info = {
+ .name = "default",
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
.state = 0,
.capabilities = BDI_CAP_MAP_COPY,
@@ -22,6 +26,18 @@ struct backing_dev_info default_backing_dev_info = {
EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
+DEFINE_SPINLOCK(bdi_lock);
+LIST_HEAD(bdi_list);
+LIST_HEAD(bdi_pending_list);
+
+static struct task_struct *sync_supers_tsk;
+static struct timer_list sync_supers_timer;
+
+static int bdi_sync_supers(void *);
+static void sync_supers_timer_fn(unsigned long);
+static void arm_supers_timer(void);
+
+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -37,9 +53,29 @@ static void bdi_debug_init(void)
static int bdi_debug_stats_show(struct seq_file *m, void *v)
{
struct backing_dev_info *bdi = m->private;
+ struct bdi_writeback *wb;
unsigned long background_thresh;
unsigned long dirty_thresh;
unsigned long bdi_thresh;
+ unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
+ struct inode *inode;
+
+ /*
+ * inode lock is enough here, the bdi->wb_list is protected by
+ * RCU on the reader side
+ */
+ nr_wb = nr_dirty = nr_io = nr_more_io = 0;
+ spin_lock(&inode_lock);
+ list_for_each_entry(wb, &bdi->wb_list, list) {
+ nr_wb++;
+ list_for_each_entry(inode, &wb->b_dirty, i_list)
+ nr_dirty++;
+ list_for_each_entry(inode, &wb->b_io, i_list)
+ nr_io++;
+ list_for_each_entry(inode, &wb->b_more_io, i_list)
+ nr_more_io++;
+ }
+ spin_unlock(&inode_lock);
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
@@ -49,12 +85,22 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"BdiReclaimable: %8lu kB\n"
"BdiDirtyThresh: %8lu kB\n"
"DirtyThresh: %8lu kB\n"
- "BackgroundThresh: %8lu kB\n",
+ "BackgroundThresh: %8lu kB\n"
+ "WriteBack threads:%8lu\n"
+ "b_dirty: %8lu\n"
+ "b_io: %8lu\n"
+ "b_more_io: %8lu\n"
+ "bdi_list: %8u\n"
+ "state: %8lx\n"
+ "wb_mask: %8lx\n"
+ "wb_list: %8u\n"
+ "wb_cnt: %8u\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
- K(bdi_thresh),
- K(dirty_thresh),
- K(background_thresh));
+ K(bdi_thresh), K(dirty_thresh),
+ K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
+ !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
+ !list_empty(&bdi->wb_list), bdi->wb_cnt);
#undef K
return 0;
@@ -185,6 +231,13 @@ static int __init default_bdi_init(void)
{
int err;
+ sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
+ BUG_ON(IS_ERR(sync_supers_tsk));
+
+ init_timer(&sync_supers_timer);
+ setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
+ arm_supers_timer();
+
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
@@ -193,6 +246,390 @@ static int __init default_bdi_init(void)
}
subsys_initcall(default_bdi_init);
+static int wb_assign_nr(struct backing_dev_info *bdi, struct bdi_writeback *wb)
+{
+ unsigned long mask = BDI_MAX_FLUSHERS - 1;
+ unsigned int nr;
+
+ do {
+ if ((bdi->wb_mask & mask) == mask)
+ return 1;
+
+ nr = find_first_zero_bit(&bdi->wb_mask, BDI_MAX_FLUSHERS);
+ } while (test_and_set_bit(nr, &bdi->wb_mask));
+
+ wb->nr = nr;
+
+ spin_lock(&bdi->wb_lock);
+ bdi->wb_cnt++;
+ spin_unlock(&bdi->wb_lock);
+
+ return 0;
+}
+
+static void bdi_put_wb(struct backing_dev_info *bdi, struct bdi_writeback *wb)
+{
+ /*
+ * If this is the default wb thread exiting, leave the bit set
+ * in the wb mask as we set that before it's created as well. This
+ * is done to make sure that assigned work with no thread has at
+ * least one receipient.
+ */
+ if (wb == &bdi->wb)
+ clear_bit(BDI_wb_alloc, &bdi->state);
+ else {
+ clear_bit(wb->nr, &bdi->wb_mask);
+ kfree(wb);
+ spin_lock(&bdi->wb_lock);
+ bdi->wb_cnt--;
+ spin_unlock(&bdi->wb_lock);
+ }
+}
+
+static int bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
+{
+ memset(wb, 0, sizeof(*wb));
+
+ wb->bdi = bdi;
+ INIT_LIST_HEAD(&wb->b_dirty);
+ INIT_LIST_HEAD(&wb->b_io);
+ INIT_LIST_HEAD(&wb->b_more_io);
+
+ return wb_assign_nr(bdi, wb);
+}
+
+static struct bdi_writeback *bdi_new_wb(struct backing_dev_info *bdi)
+{
+ struct bdi_writeback *wb;
+
+ /*
+ * Default bdi->wb is already assigned, so just return it
+ */
+ if (!test_and_set_bit(BDI_wb_alloc, &bdi->state))
+ wb = &bdi->wb;
+ else {
+ wb = kmalloc(sizeof(struct bdi_writeback), GFP_KERNEL);
+ if (wb) {
+ if (bdi_wb_init(wb, bdi)) {
+ kfree(wb);
+ wb = NULL;
+ }
+ }
+ }
+
+ return wb;
+}
+
+static void bdi_task_init(struct backing_dev_info *bdi,
+ struct bdi_writeback *wb)
+{
+ struct task_struct *tsk = current;
+ int was_empty;
+
+ /*
+ * Add us to the active bdi_list. If we are adding threads beyond
+ * the default embedded bdi_writeback, then we need to start using
+ * proper locking. Check the list for empty first, then set the
+ * BDI_wblist_lock flag if there's > 1 entry on the list now
+ */
+ spin_lock(&bdi->wb_lock);
+
+ was_empty = list_empty(&bdi->wb_list);
+ list_add_tail_rcu(&wb->list, &bdi->wb_list);
+ if (!was_empty)
+ set_bit(BDI_wblist_lock, &bdi->state);
+
+ spin_unlock(&bdi->wb_lock);
+
+ tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ set_freezable();
+
+ /*
+ * Our parent may run at a different priority, just set us to normal
+ */
+ set_user_nice(tsk, 0);
+}
+
+static int bdi_start_fn(void *ptr)
+{
+ struct bdi_writeback *wb = ptr;
+ struct backing_dev_info *bdi = wb->bdi;
+ int ret;
+
+ /*
+ * Add us to the active bdi_list
+ */
+ spin_lock(&bdi_lock);
+ list_add(&bdi->bdi_list, &bdi_list);
+ spin_unlock(&bdi_lock);
+
+ bdi_task_init(bdi, wb);
+
+ /*
+ * Clear pending bit and wakeup anybody waiting to tear us down
+ */
+ clear_bit(BDI_pending, &bdi->state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&bdi->state, BDI_pending);
+
+ ret = bdi_writeback_task(wb);
+
+ /*
+ * Remove us from the list
+ */
+ spin_lock(&bdi->wb_lock);
+ list_del_rcu(&wb->list);
+ spin_unlock(&bdi->wb_lock);
+
+ /*
+ * wait for rcu grace period to end, so we can free wb
+ */
+ synchronize_srcu(&bdi->srcu);
+
+ /*
+ * Flush any work that raced with us exiting. No new work
+ * will be added, since this bdi isn't discoverable anymore.
+ */
+ if (!list_empty(&bdi->work_list))
+ wb_do_writeback(wb, 1);
+
+ wb->task = NULL;
+ bdi_put_wb(bdi, wb);
+ return ret;
+}
+
+int bdi_has_dirty_io(struct backing_dev_info *bdi)
+{
+ struct bdi_writeback *wb;
+ int ret = 0;
+
+ if (!bdi_wblist_needs_lock(bdi))
+ ret = wb_has_dirty_io(&bdi->wb);
+ else {
+ int idx;
+
+ idx = srcu_read_lock(&bdi->srcu);
+
+ list_for_each_entry_rcu(wb, &bdi->wb_list, list) {
+ ret = wb_has_dirty_io(wb);
+ if (ret)
+ break;
+ }
+
+ srcu_read_unlock(&bdi->srcu, idx);
+ }
+
+ return ret;
+}
+
+static void bdi_flush_io(struct backing_dev_info *bdi)
+{
+ struct writeback_control wbc = {
+ .bdi = bdi,
+ .sync_mode = WB_SYNC_NONE,
+ .older_than_this = NULL,
+ .range_cyclic = 1,
+ .nr_to_write = 1024,
+ };
+
+ generic_sync_bdi_inodes(NULL, &wbc);
+}
+
+/*
+ * kupdated() used to do this. We cannot do it from the bdi_forker_task()
+ * or we risk deadlocking on ->s_umount. The longer term solution would be
+ * to implement sync_supers_bdi() or similar and simply do it from the
+ * bdi writeback tasks individually.
+ */
+static int bdi_sync_supers(void *unused)
+{
+ set_user_nice(current, 0);
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ /*
+ * Do this periodically, like kupdated() did before.
+ */
+ sync_supers();
+ }
+
+ return 0;
+}
+
+static void arm_supers_timer(void)
+{
+ unsigned long next;
+
+ next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
+ mod_timer(&sync_supers_timer, round_jiffies_up(next));
+}
+
+static void sync_supers_timer_fn(unsigned long unused)
+{
+ wake_up_process(sync_supers_tsk);
+ arm_supers_timer();
+}
+
+static int bdi_forker_task(void *ptr)
+{
+ struct bdi_writeback *me = ptr;
+
+ bdi_task_init(me->bdi, me);
+
+ for (;;) {
+ struct backing_dev_info *bdi, *tmp;
+ struct bdi_writeback *wb;
+
+ /*
+ * Temporary measure, we want to make sure we don't see
+ * dirty data on the default backing_dev_info
+ */
+ if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
+ wb_do_writeback(me, 0);
+
+ spin_lock(&bdi_lock);
+
+ /*
+ * Check if any existing bdi's have dirty data without
+ * a thread registered. If so, set that up.
+ */
+ list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
+ if (bdi->wb.task)
+ continue;
+ if (list_empty(&bdi->work_list) &&
+ !bdi_has_dirty_io(bdi))
+ continue;
+
+ bdi_add_default_flusher_task(bdi);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (list_empty(&bdi_pending_list)) {
+ unsigned long wait;
+
+ spin_unlock(&bdi_lock);
+ wait = msecs_to_jiffies(dirty_writeback_interval * 10);
+ schedule_timeout(wait);
+ try_to_freeze();
+ continue;
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ /*
+ * This is our real job - check for pending entries in
+ * bdi_pending_list, and create the tasks that got added
+ */
+ bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
+ bdi_list);
+ list_del_init(&bdi->bdi_list);
+ spin_unlock(&bdi_lock);
+
+ wb = bdi_new_wb(bdi);
+ if (!wb)
+ goto readd_flush;
+
+ wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
+ dev_name(bdi->dev));
+ /*
+ * If task creation fails, then readd the bdi to
+ * the pending list and force writeout of the bdi
+ * from this forker thread. That will free some memory
+ * and we can try again.
+ */
+ if (IS_ERR(wb->task)) {
+ wb->task = NULL;
+ bdi_put_wb(bdi, wb);
+readd_flush:
+ /*
+ * Add this 'bdi' to the back, so we get
+ * a chance to flush other bdi's to free
+ * memory.
+ */
+ spin_lock(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_pending_list);
+ spin_unlock(&bdi_lock);
+
+ bdi_flush_io(bdi);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * bdi_lock held on entry
+ */
+static void bdi_add_one_flusher_task(struct backing_dev_info *bdi,
+ int(*func)(struct backing_dev_info *))
+{
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+ if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
+ printk("bdi %p/%s is not registered!\n", bdi, bdi->name);
+ return;
+ }
+
+ /*
+ * Check with the helper whether to proceed adding a task. Will only
+ * abort if we two or more simultanous calls to
+ * bdi_add_default_flusher_task() occured, further additions will block
+ * waiting for previous additions to finish.
+ */
+ if (!func(bdi)) {
+ list_move_tail(&bdi->bdi_list, &bdi_pending_list);
+
+ /*
+ * We are now on the pending list, wake up bdi_forker_task()
+ * to finish the job and add us back to the active bdi_list
+ */
+ wake_up_process(default_backing_dev_info.wb.task);
+ }
+}
+
+static int flusher_add_helper_block(struct backing_dev_info *bdi)
+{
+ spin_unlock(&bdi_lock);
+ wait_on_bit_lock(&bdi->state, BDI_pending, bdi_sched_wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_lock(&bdi_lock);
+ return 0;
+}
+
+static int flusher_add_helper_test(struct backing_dev_info *bdi)
+{
+ return test_and_set_bit(BDI_pending, &bdi->state);
+}
+
+/*
+ * Add the default flusher task that gets created for any bdi
+ * that has dirty data pending writeout
+ */
+void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
+{
+ bdi_add_one_flusher_task(bdi, flusher_add_helper_test);
+}
+
+/**
+ * bdi_add_flusher_task - add one more flusher task to this @bdi
+ * @bdi: the bdi
+ *
+ * Add an additional flusher task to this @bdi. Will block waiting on
+ * previous additions, if any.
+ *
+ */
+void bdi_add_flusher_task(struct backing_dev_info *bdi)
+{
+ spin_lock(&bdi_lock);
+ bdi_add_one_flusher_task(bdi, flusher_add_helper_block);
+ spin_unlock(&bdi_lock);
+}
+EXPORT_SYMBOL(bdi_add_flusher_task);
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
@@ -211,9 +648,42 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
goto exit;
}
+ spin_lock(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_list);
+ spin_unlock(&bdi_lock);
+
bdi->dev = dev;
- bdi_debug_register(bdi, dev_name(dev));
+ /*
+ * Just start the forker thread for our default backing_dev_info,
+ * and add other bdi's to the list. They will get a thread created
+ * on-demand when they need it.
+ */
+ if (bdi_cap_flush_forker(bdi)) {
+ struct bdi_writeback *wb;
+
+ wb = bdi_new_wb(bdi);
+ if (!wb) {
+ ret = -ENOMEM;
+ goto remove_err;
+ }
+
+ wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
+ dev_name(dev));
+ if (IS_ERR(wb->task)) {
+ wb->task = NULL;
+ bdi_put_wb(bdi, wb);
+ ret = -ENOMEM;
+remove_err:
+ spin_lock(&bdi_lock);
+ list_del(&bdi->bdi_list);
+ spin_unlock(&bdi_lock);
+ goto exit;
+ }
+ }
+
+ bdi_debug_register(bdi, dev_name(dev));
+ set_bit(BDI_registered, &bdi->state);
exit:
return ret;
}
@@ -225,9 +695,42 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
}
EXPORT_SYMBOL(bdi_register_dev);
+/*
+ * Remove bdi from the global list and shutdown any threads we have running
+ */
+static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+{
+ struct bdi_writeback *wb;
+
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+ /*
+ * If setup is pending, wait for that to complete first
+ */
+ wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
+ TASK_UNINTERRUPTIBLE);
+
+ /*
+ * Make sure nobody finds us on the bdi_list anymore
+ */
+ spin_lock(&bdi_lock);
+ list_del(&bdi->bdi_list);
+ spin_unlock(&bdi_lock);
+
+ /*
+ * Finally, kill the kernel threads. We don't need to be RCU
+ * safe anymore, since the bdi is gone from visibility.
+ */
+ list_for_each_entry(wb, &bdi->wb_list, list)
+ kthread_stop(wb->task);
+}
+
void bdi_unregister(struct backing_dev_info *bdi)
{
if (bdi->dev) {
+ if (!bdi_cap_flush_forker(bdi))
+ bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);
bdi->dev = NULL;
@@ -237,14 +740,21 @@ EXPORT_SYMBOL(bdi_unregister);
int bdi_init(struct backing_dev_info *bdi)
{
- int i;
- int err;
+ int i, err;
bdi->dev = NULL;
bdi->min_ratio = 0;
bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE;
+ spin_lock_init(&bdi->wb_lock);
+ bdi->wb_mask = 0;
+ bdi->wb_cnt = 0;
+ INIT_LIST_HEAD(&bdi->bdi_list);
+ INIT_LIST_HEAD(&bdi->wb_list);
+ INIT_LIST_HEAD(&bdi->work_list);
+
+ bdi_wb_init(&bdi->wb, bdi);
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
err = percpu_counter_init(&bdi->bdi_stat[i], 0);
@@ -252,10 +762,15 @@ int bdi_init(struct backing_dev_info *bdi)
goto err;
}
+ err = init_srcu_struct(&bdi->srcu);
+ if (err)
+ goto err;
+
bdi->dirty_exceeded = 0;
err = prop_local_init_percpu(&bdi->completions);
if (err) {
+ cleanup_srcu_struct(&bdi->srcu);
err:
while (i--)
percpu_counter_destroy(&bdi->bdi_stat[i]);
@@ -269,8 +784,12 @@ void bdi_destroy(struct backing_dev_info *bdi)
{
int i;
+ WARN_ON(bdi_has_dirty_io(bdi));
+
bdi_unregister(bdi);
+ cleanup_srcu_struct(&bdi->srcu);
+
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
percpu_counter_destroy(&bdi->bdi_stat[i]);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index b1f0885dda22..3df063706f53 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
unsigned pages = 0;
unsigned blocks = 0;
+ spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
+ spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d0351e31f474..03a9c8b760d3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -234,6 +234,7 @@ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
return 1UL << (hstate->order + PAGE_SHIFT);
}
+EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
/*
* Return the page size being used by the MMU to back a VMA. In the majority
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index d5292fc6f523..177a5169bbde 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -36,7 +36,7 @@ struct test_node {
};
static LIST_HEAD(test_list);
-static DEFINE_PER_CPU(void *, test_pointer);
+static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
/*
* Some very simple testing. This function needs to be extended for
@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
}
for_each_possible_cpu(i) {
- per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL);
+ per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
pr_info("kmemleak: kmalloc(129) = %p\n",
- per_cpu(test_pointer, i));
+ per_cpu(kmemleak_test_pointer, i));
}
return 0;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c96f2c8700aa..e766e1da09d2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -48,10 +48,10 @@
* scanned. This list is only modified during a scanning episode when the
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
* Note that the kmemleak_object.use_count is incremented when an object is
- * added to the gray_list and therefore cannot be freed
- * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
- * file together with modifications to the memory scanning parameters
- * including the scan_thread pointer
+ * added to the gray_list and therefore cannot be freed. This mutex also
+ * prevents multiple users of the "kmemleak" debugfs file together with
+ * modifications to the memory scanning parameters including the scan_thread
+ * pointer
*
* The kmemleak_object structures have a use_count incremented or decremented
* using the get_object()/put_object() functions. When the use_count becomes
@@ -105,7 +105,6 @@
#define MAX_TRACE 16 /* stack trace length */
#define REPORTS_NR 50 /* maximum number of reported leaks */
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
-#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
@@ -186,19 +185,16 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0);
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
-/* used for yielding the CPU to other tasks during scanning */
-static unsigned long next_scan_yield;
static struct task_struct *scan_thread;
-static unsigned long jiffies_scan_yield;
+/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
+static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
-static int kmemleak_stack_scan;
-/* mutex protecting the memory scanning */
+static int kmemleak_stack_scan = 1;
+/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
-/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
-static DEFINE_MUTEX(kmemleak_mutex);
/* number of leaks reported (for limitation purposes) */
static int reported_leaks;
@@ -235,7 +231,7 @@ struct early_log {
};
/* early logging buffer and current position */
-static struct early_log early_log[200];
+static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
static int crt_early_log;
static void kmemleak_disable(void);
@@ -279,15 +275,6 @@ static int color_gray(const struct kmemleak_object *object)
}
/*
- * Objects are considered referenced if their color is gray and they have not
- * been deleted.
- */
-static int referenced_object(struct kmemleak_object *object)
-{
- return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
-}
-
-/*
* Objects are considered unreferenced only if their color is white, they have
* not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers.
@@ -295,42 +282,28 @@ static int referenced_object(struct kmemleak_object *object)
static int unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
- time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
+ time_before_eq(object->jiffies + jiffies_min_age,
+ jiffies_last_scan);
}
/*
- * Printing of the (un)referenced objects information, either to the seq file
- * or to the kernel log. The print_referenced/print_unreferenced functions
- * must be called with the object->lock held.
+ * Printing of the unreferenced objects information to the seq file. The
+ * print_unreferenced function must be called with the object->lock held.
*/
-#define print_helper(seq, x...) do { \
- struct seq_file *s = (seq); \
- if (s) \
- seq_printf(s, x); \
- else \
- pr_info(x); \
-} while (0)
-
-static void print_referenced(struct kmemleak_object *object)
-{
- pr_info("referenced object 0x%08lx (size %zu)\n",
- object->pointer, object->size);
-}
-
static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
- print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
- print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
- print_helper(seq, " backtrace:\n");
+ seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
+ object->pointer, object->size);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
- print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
+ seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
}
}
@@ -554,8 +527,10 @@ static void delete_object(unsigned long ptr)
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, 0);
if (!object) {
+#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
ptr);
+#endif
write_unlock_irqrestore(&kmemleak_lock, flags);
return;
}
@@ -571,8 +546,6 @@ static void delete_object(unsigned long ptr)
* cannot be freed when it is being scanned.
*/
spin_lock_irqsave(&object->lock, flags);
- if (object->flags & OBJECT_REPORTED)
- print_referenced(object);
object->flags &= ~OBJECT_ALLOCATED;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
@@ -696,7 +669,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_stop("Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded\n");
+ kmemleak_disable();
return;
}
@@ -808,21 +782,6 @@ void kmemleak_no_scan(const void *ptr)
EXPORT_SYMBOL(kmemleak_no_scan);
/*
- * Yield the CPU so that other tasks get a chance to run. The yielding is
- * rate-limited to avoid excessive number of calls to the schedule() function
- * during memory scanning.
- */
-static void scan_yield(void)
-{
- might_sleep();
-
- if (time_is_before_eq_jiffies(next_scan_yield)) {
- schedule();
- next_scan_yield = jiffies + jiffies_scan_yield;
- }
-}
-
-/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -862,15 +821,6 @@ static void scan_block(void *_start, void *_end,
if (scan_should_stop())
break;
- /*
- * When scanning a memory block with a corresponding
- * kmemleak_object, the CPU yielding is handled in the calling
- * code since it holds the object->lock to avoid the block
- * freeing.
- */
- if (!scanned)
- scan_yield();
-
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -952,6 +902,9 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i;
+ int new_leaks = 0;
+
+ jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@@ -1033,7 +986,7 @@ static void kmemleak_scan(void)
*/
object = list_entry(gray_list.next, typeof(*object), gray_list);
while (&object->gray_list != &gray_list) {
- scan_yield();
+ cond_resched();
/* may add new objects to the list */
if (!scan_should_stop())
@@ -1049,6 +1002,32 @@ static void kmemleak_scan(void)
object = tmp;
}
WARN_ON(!list_empty(&gray_list));
+
+ /*
+ * If scanning was stopped do not report any new unreferenced objects.
+ */
+ if (scan_should_stop())
+ return;
+
+ /*
+ * Scanning result reporting.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if (unreferenced_object(object) &&
+ !(object->flags & OBJECT_REPORTED)) {
+ object->flags |= OBJECT_REPORTED;
+ new_leaks++;
+ }
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+ if (new_leaks)
+ pr_info("%d new suspected memory leaks (see "
+ "/sys/kernel/debug/kmemleak)\n", new_leaks);
+
}
/*
@@ -1070,36 +1049,12 @@ static int kmemleak_scan_thread(void *arg)
}
while (!kthread_should_stop()) {
- struct kmemleak_object *object;
signed long timeout = jiffies_scan_wait;
mutex_lock(&scan_mutex);
-
kmemleak_scan();
- reported_leaks = 0;
-
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- unsigned long flags;
-
- if (reported_leaks >= REPORTS_NR)
- break;
- spin_lock_irqsave(&object->lock, flags);
- if (!(object->flags & OBJECT_REPORTED) &&
- unreferenced_object(object)) {
- print_unreferenced(NULL, object);
- object->flags |= OBJECT_REPORTED;
- reported_leaks++;
- } else if ((object->flags & OBJECT_REPORTED) &&
- referenced_object(object)) {
- print_referenced(object);
- object->flags &= ~OBJECT_REPORTED;
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
mutex_unlock(&scan_mutex);
+
/* wait before the next scan */
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@@ -1112,7 +1067,7 @@ static int kmemleak_scan_thread(void *arg)
/*
* Start the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void start_scan_thread(void)
{
@@ -1127,7 +1082,7 @@ void start_scan_thread(void)
/*
* Stop the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void stop_scan_thread(void)
{
@@ -1147,10 +1102,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
struct kmemleak_object *object;
loff_t n = *pos;
- if (!n) {
- kmemleak_scan();
+ if (!n)
reported_leaks = 0;
- }
if (reported_leaks >= REPORTS_NR)
return NULL;
@@ -1211,11 +1164,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
unsigned long flags;
spin_lock_irqsave(&object->lock, flags);
- if (!unreferenced_object(object))
- goto out;
- print_unreferenced(seq, object);
- reported_leaks++;
-out:
+ if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
+ print_unreferenced(seq, object);
+ reported_leaks++;
+ }
spin_unlock_irqrestore(&object->lock, flags);
return 0;
}
@@ -1234,13 +1186,10 @@ static int kmemleak_open(struct inode *inode, struct file *file)
if (!atomic_read(&kmemleak_enabled))
return -EBUSY;
- ret = mutex_lock_interruptible(&kmemleak_mutex);
+ ret = mutex_lock_interruptible(&scan_mutex);
if (ret < 0)
goto out;
if (file->f_mode & FMODE_READ) {
- ret = mutex_lock_interruptible(&scan_mutex);
- if (ret < 0)
- goto kmemleak_unlock;
ret = seq_open(file, &kmemleak_seq_ops);
if (ret < 0)
goto scan_unlock;
@@ -1249,8 +1198,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
scan_unlock:
mutex_unlock(&scan_mutex);
-kmemleak_unlock:
- mutex_unlock(&kmemleak_mutex);
out:
return ret;
}
@@ -1259,11 +1206,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
{
int ret = 0;
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
seq_release(inode, file);
- mutex_unlock(&scan_mutex);
- }
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
return ret;
}
@@ -1278,6 +1223,7 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=off - stop the automatic memory scanning thread
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
+ * scan - trigger a memory scan
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@@ -1315,7 +1261,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
start_scan_thread();
}
- } else
+ } else if (strncmp(buf, "scan", 4) == 0)
+ kmemleak_scan();
+ else
return -EINVAL;
/* ignore the rest of the buffer, only one command at a time */
@@ -1340,11 +1288,9 @@ static int kmemleak_cleanup_thread(void *arg)
{
struct kmemleak_object *object;
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
stop_scan_thread();
- mutex_unlock(&kmemleak_mutex);
- mutex_lock(&scan_mutex);
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list)
delete_object(object->pointer);
@@ -1411,7 +1357,6 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
@@ -1486,9 +1431,9 @@ static int __init kmemleak_late_init(void)
&kmemleak_fops);
if (!dentry)
pr_warning("Failed to create the debugfs kmemleak file\n");
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
start_scan_thread();
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
pr_info("Kernel memory leak detector initialized\n");
diff --git a/mm/memory.c b/mm/memory.c
index f46ac18ba231..65216194eb8d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, int nr_pages, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned int vm_flags = 0;
@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
- if (len <= 0)
+ if (nr_pages <= 0)
return 0;
/*
* Require read or write permissions.
@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
- len--;
+ nr_pages--;
continue;
}
@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &len, i, write);
+ &start, &nr_pages, i, write);
continue;
}
@@ -1357,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vmas[i] = vma;
i++;
start += PAGE_SIZE;
- len--;
- } while (len && start < vma->vm_end);
- } while (len);
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
return i;
}
@@ -1368,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
* @start: starting user address
- * @len: number of pages from start to pin
+ * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller
* @force: whether to force write access even if user mapping is
* readonly. This will result in the page being COWed even
@@ -1380,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* Or NULL if the caller does not require them.
*
* Returns number of pages pinned. This may be fewer than the number
- * requested. If len is 0 or negative, returns 0. If no pages
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
@@ -1414,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* See also get_user_pages_fast, for performance critical applications.
*/
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
@@ -1424,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (force)
flags |= GUP_FLAGS_FORCE;
- return __get_user_pages(tsk, mm,
- start, len, flags,
- pages, vmas);
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
EXPORT_SYMBOL(get_user_pages);
diff --git a/mm/nommu.c b/mm/nommu.c
index 2fd2ad5da98e..53cab10fece4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp)
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, int nr_pages, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
- for (i = 0; i < len; i++) {
+ for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
if (!vma)
goto finish_or_fault;
@@ -224,7 +224,7 @@ finish_or_fault:
* - don't permit access to VMAs that don't support it, such as I/O mappings
*/
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
@@ -234,12 +234,31 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (force)
flags |= GUP_FLAGS_FORCE;
- return __get_user_pages(tsk, mm,
- start, len, flags,
- pages, vmas);
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
EXPORT_SYMBOL(get_user_pages);
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn)
+{
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ return -EINVAL;
+
+ *pfn = address >> PAGE_SHIFT;
+ return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935b..f4a55313c9a6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,15 +36,6 @@
#include <linux/pagevec.h>
/*
- * The maximum number of pages to writeout in a single bdflush/kupdate
- * operation. We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode. Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES 1024
-
-/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling.
*/
@@ -117,8 +108,6 @@ EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
-static void background_writeout(unsigned long _min_pages);
-
/*
* Scale the writeback cache size proportional to the relative writeout speeds.
*
@@ -320,15 +309,13 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
/*
*
*/
-static DEFINE_SPINLOCK(bdi_lock);
static unsigned int bdi_min_ratio;
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&bdi_lock, flags);
+ spin_lock(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
@@ -340,27 +327,26 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret = -EINVAL;
}
}
- spin_unlock_irqrestore(&bdi_lock, flags);
+ spin_unlock(&bdi_lock);
return ret;
}
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
- unsigned long flags;
int ret = 0;
if (max_ratio > 100)
return -EINVAL;
- spin_lock_irqsave(&bdi_lock, flags);
+ spin_lock(&bdi_lock);
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
}
- spin_unlock_irqrestore(&bdi_lock, flags);
+ spin_unlock(&bdi_lock);
return ret;
}
@@ -541,9 +527,12 @@ static void balance_dirty_pages(struct address_space *mapping)
* filesystems (i.e. NFS) in which data may have been
* written to the server's write cache, but has not yet
* been flushed to permanent storage.
+ * Only move pages to writeback if this bdi is over its
+ * threshold otherwise wait until the disk writes catch
+ * up.
*/
- if (bdi_nr_reclaimable) {
- writeback_inodes(&wbc);
+ if (bdi_nr_reclaimable > bdi_thresh) {
+ generic_sync_bdi_inodes(NULL, &wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
@@ -594,7 +583,7 @@ static void balance_dirty_pages(struct address_space *mapping)
(!laptop_mode && (global_page_state(NR_FILE_DIRTY)
+ global_page_state(NR_UNSTABLE_NFS)
> background_thresh)))
- pdflush_operation(background_writeout, 0);
+ bdi_start_writeback(bdi, NULL, 0, WB_SYNC_NONE);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -607,6 +596,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
}
}
+static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
+
/**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied
@@ -624,7 +615,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
unsigned long nr_pages_dirtied)
{
- static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
unsigned long ratelimit;
unsigned long *p;
@@ -637,7 +627,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
* tasks in balance_dirty_pages(). Period.
*/
preempt_disable();
- p = &__get_cpu_var(ratelimits);
+ p = &__get_cpu_var(bdp_ratelimits);
*p += nr_pages_dirtied;
if (unlikely(*p >= ratelimit)) {
*p = 0;
@@ -679,152 +669,53 @@ void throttle_vm_writeout(gfp_t gfp_mask)
}
/*
- * writeback at least _min_pages, and keep writing until the amount of dirty
- * memory is less than the background threshold, or until we're all clean.
+ * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
+ * the whole world.
*/
-static void background_writeout(unsigned long _min_pages)
+void wakeup_flusher_threads(long nr_pages)
{
- long min_pages = _min_pages;
struct writeback_control wbc = {
- .bdi = NULL,
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
- .nr_to_write = 0,
- .nonblocking = 1,
.range_cyclic = 1,
};
- for ( ; ; ) {
- unsigned long background_thresh;
- unsigned long dirty_thresh;
-
- get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
- if (global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) < background_thresh
- && min_pages <= 0)
- break;
- wbc.more_io = 0;
- wbc.encountered_congestion = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- wbc.pages_skipped = 0;
- writeback_inodes(&wbc);
- min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
- /* Wrote less than expected */
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(WRITE, HZ/10);
- else
- break;
- }
- }
-}
-
-/*
- * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
- * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
- * -1 if all pdflush threads were busy.
- */
-int wakeup_pdflush(long nr_pages)
-{
if (nr_pages == 0)
nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
- return pdflush_operation(background_writeout, nr_pages);
+ wbc.nr_to_write = nr_pages;
+ bdi_writeback_all(NULL, &wbc);
}
-static void wb_timer_fn(unsigned long unused);
static void laptop_timer_fn(unsigned long unused);
-static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
/*
- * Periodic writeback of "old" data.
- *
- * Define "old": the first time one of an inode's pages is dirtied, we mark the
- * dirtying-time in the inode's address_space. So this periodic writeback code
- * just walks the superblock inode list, writing back any inodes which are
- * older than a specific point in time.
- *
- * Try to run once per dirty_writeback_interval. But if a writeback event
- * takes longer than a dirty_writeback_interval interval, then leave a
- * one-second gap.
- *
- * older_than_this takes precedence over nr_to_write. So we'll only write back
- * all dirty pages if they are all attached to "old" mappings.
- */
-static void wb_kupdate(unsigned long arg)
-{
- unsigned long oldest_jif;
- unsigned long start_jif;
- unsigned long next_jif;
- long nr_to_write;
- struct writeback_control wbc = {
- .bdi = NULL,
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = &oldest_jif,
- .nr_to_write = 0,
- .nonblocking = 1,
- .for_kupdate = 1,
- .range_cyclic = 1,
- };
-
- sync_supers();
-
- oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
- start_jif = jiffies;
- next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
- nr_to_write = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
- while (nr_to_write > 0) {
- wbc.more_io = 0;
- wbc.encountered_congestion = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- writeback_inodes(&wbc);
- if (wbc.nr_to_write > 0) {
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(WRITE, HZ/10);
- else
- break; /* All the old data is written */
- }
- nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- }
- if (time_before(next_jif, jiffies + HZ))
- next_jif = jiffies + HZ;
- if (dirty_writeback_interval)
- mod_timer(&wb_timer, next_jif);
-}
-
-/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, file, buffer, length, ppos);
- if (dirty_writeback_interval)
- mod_timer(&wb_timer, jiffies +
- msecs_to_jiffies(dirty_writeback_interval * 10));
- else
- del_timer(&wb_timer);
return 0;
}
-static void wb_timer_fn(unsigned long unused)
+static void do_laptop_sync(struct work_struct *work)
{
- if (pdflush_operation(wb_kupdate, 0) < 0)
- mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
-}
-
-static void laptop_flush(unsigned long unused)
-{
- sys_sync();
+ wakeup_flusher_threads(0);
+ kfree(work);
}
static void laptop_timer_fn(unsigned long unused)
{
- pdflush_operation(laptop_flush, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_laptop_sync);
+ schedule_work(work);
+ }
}
/*
@@ -907,8 +798,6 @@ void __init page_writeback_init(void)
{
int shift;
- mod_timer(&wb_timer,
- jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5d714f8fb303..e0f2cdf9d8b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
int i, nid;
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
+ /* save the state before borrow the nodemask */
+ nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
if (!required_kernelcore)
- return;
+ goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
find_usable_zone_for_movable();
@@ -4158,6 +4160,10 @@ restart:
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+ /* restore the node_state */
+ node_states[N_HIGH_MEMORY] = saved_node_state;
}
/* Any regular memory on that node ? */
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
- /*
- * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
- * that node_mask, clear it at first
- */
- nodes_clear(node_states[N_HIGH_MEMORY]);
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
diff --git a/mm/pdflush.c b/mm/pdflush.c
deleted file mode 100644
index 235ac440c44e..000000000000
--- a/mm/pdflush.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * mm/pdflush.c - worker threads for writing back filesystem data
- *
- * Copyright (C) 2002, Linus Torvalds.
- *
- * 09Apr2002 Andrew Morton
- * Initial version
- * 29Feb2004 kaos@sgi.com
- * Move worker thread creation to kthread to avoid chewing
- * up stack space with nested calls to kernel_thread.
- */
-
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/signal.h>
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h> /* Needed by writeback.h */
-#include <linux/writeback.h> /* Prototypes pdflush_operation() */
-#include <linux/kthread.h>
-#include <linux/cpuset.h>
-#include <linux/freezer.h>
-
-
-/*
- * Minimum and maximum number of pdflush instances
- */
-#define MIN_PDFLUSH_THREADS 2
-#define MAX_PDFLUSH_THREADS 8
-
-static void start_one_pdflush_thread(void);
-
-
-/*
- * The pdflush threads are worker threads for writing back dirty data.
- * Ideally, we'd like one thread per active disk spindle. But the disk
- * topology is very hard to divine at this level. Instead, we take
- * care in various places to prevent more than one pdflush thread from
- * performing writeback against a single filesystem. pdflush threads
- * have the PF_FLUSHER flag set in current->flags to aid in this.
- */
-
-/*
- * All the pdflush threads. Protected by pdflush_lock
- */
-static LIST_HEAD(pdflush_list);
-static DEFINE_SPINLOCK(pdflush_lock);
-
-/*
- * The count of currently-running pdflush threads. Protected
- * by pdflush_lock.
- *
- * Readable by sysctl, but not writable. Published to userspace at
- * /proc/sys/vm/nr_pdflush_threads.
- */
-int nr_pdflush_threads = 0;
-
-/*
- * The time at which the pdflush thread pool last went empty
- */
-static unsigned long last_empty_jifs;
-
-/*
- * The pdflush thread.
- *
- * Thread pool management algorithm:
- *
- * - The minimum and maximum number of pdflush instances are bound
- * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
- *
- * - If there have been no idle pdflush instances for 1 second, create
- * a new one.
- *
- * - If the least-recently-went-to-sleep pdflush thread has been asleep
- * for more than one second, terminate a thread.
- */
-
-/*
- * A structure for passing work to a pdflush thread. Also for passing
- * state information between pdflush threads. Protected by pdflush_lock.
- */
-struct pdflush_work {
- struct task_struct *who; /* The thread */
- void (*fn)(unsigned long); /* A callback function */
- unsigned long arg0; /* An argument to the callback */
- struct list_head list; /* On pdflush_list, when idle */
- unsigned long when_i_went_to_sleep;
-};
-
-static int __pdflush(struct pdflush_work *my_work)
-{
- current->flags |= PF_FLUSHER | PF_SWAPWRITE;
- set_freezable();
- my_work->fn = NULL;
- my_work->who = current;
- INIT_LIST_HEAD(&my_work->list);
-
- spin_lock_irq(&pdflush_lock);
- for ( ; ; ) {
- struct pdflush_work *pdf;
-
- set_current_state(TASK_INTERRUPTIBLE);
- list_move(&my_work->list, &pdflush_list);
- my_work->when_i_went_to_sleep = jiffies;
- spin_unlock_irq(&pdflush_lock);
- schedule();
- try_to_freeze();
- spin_lock_irq(&pdflush_lock);
- if (!list_empty(&my_work->list)) {
- /*
- * Someone woke us up, but without removing our control
- * structure from the global list. swsusp will do this
- * in try_to_freeze()->refrigerator(). Handle it.
- */
- my_work->fn = NULL;
- continue;
- }
- if (my_work->fn == NULL) {
- printk("pdflush: bogus wakeup\n");
- continue;
- }
- spin_unlock_irq(&pdflush_lock);
-
- (*my_work->fn)(my_work->arg0);
-
- spin_lock_irq(&pdflush_lock);
-
- /*
- * Thread creation: For how long have there been zero
- * available threads?
- *
- * To throttle creation, we reset last_empty_jifs.
- */
- if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
- if (list_empty(&pdflush_list)) {
- if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
- last_empty_jifs = jiffies;
- nr_pdflush_threads++;
- spin_unlock_irq(&pdflush_lock);
- start_one_pdflush_thread();
- spin_lock_irq(&pdflush_lock);
- }
- }
- }
-
- my_work->fn = NULL;
-
- /*
- * Thread destruction: For how long has the sleepiest
- * thread slept?
- */
- if (list_empty(&pdflush_list))
- continue;
- if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
- continue;
- pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
- if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
- /* Limit exit rate */
- pdf->when_i_went_to_sleep = jiffies;
- break; /* exeunt */
- }
- }
- nr_pdflush_threads--;
- spin_unlock_irq(&pdflush_lock);
- return 0;
-}
-
-/*
- * Of course, my_work wants to be just a local in __pdflush(). It is
- * separated out in this manner to hopefully prevent the compiler from
- * performing unfortunate optimisations against the auto variables. Because
- * these are visible to other tasks and CPUs. (No problem has actually
- * been observed. This is just paranoia).
- */
-static int pdflush(void *dummy)
-{
- struct pdflush_work my_work;
- cpumask_var_t cpus_allowed;
-
- /*
- * Since the caller doesn't even check kthread_run() worked, let's not
- * freak out too much if this fails.
- */
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
- printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
- return 0;
- }
-
- /*
- * pdflush can spend a lot of time doing encryption via dm-crypt. We
- * don't want to do that at keventd's priority.
- */
- set_user_nice(current, 0);
-
- /*
- * Some configs put our parent kthread in a limited cpuset,
- * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
- * Our needs are more modest - cut back to our cpusets cpus_allowed.
- * This is needed as pdflush's are dynamically created and destroyed.
- * The boottime pdflush's are easily placed w/o these 2 lines.
- */
- cpuset_cpus_allowed(current, cpus_allowed);
- set_cpus_allowed_ptr(current, cpus_allowed);
- free_cpumask_var(cpus_allowed);
-
- return __pdflush(&my_work);
-}
-
-/*
- * Attempt to wake up a pdflush thread, and get it to do some work for you.
- * Returns zero if it indeed managed to find a worker thread, and passed your
- * payload to it.
- */
-int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
-{
- unsigned long flags;
- int ret = 0;
-
- BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */
-
- spin_lock_irqsave(&pdflush_lock, flags);
- if (list_empty(&pdflush_list)) {
- ret = -1;
- } else {
- struct pdflush_work *pdf;
-
- pdf = list_entry(pdflush_list.next, struct pdflush_work, list);
- list_del_init(&pdf->list);
- if (list_empty(&pdflush_list))
- last_empty_jifs = jiffies;
- pdf->fn = fn;
- pdf->arg0 = arg0;
- wake_up_process(pdf->who);
- }
- spin_unlock_irqrestore(&pdflush_lock, flags);
-
- return ret;
-}
-
-static void start_one_pdflush_thread(void)
-{
- struct task_struct *k;
-
- k = kthread_run(pdflush, NULL, "pdflush");
- if (unlikely(IS_ERR(k))) {
- spin_lock_irq(&pdflush_lock);
- nr_pdflush_threads--;
- spin_unlock_irq(&pdflush_lock);
- }
-}
-
-static int __init pdflush_init(void)
-{
- int i;
-
- /*
- * Pre-set nr_pdflush_threads... If we fail to create,
- * the count will be decremented.
- */
- nr_pdflush_threads = MIN_PDFLUSH_THREADS;
-
- for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
- start_one_pdflush_thread();
- return 0;
-}
-
-module_init(pdflush_init);
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e81..b3d0bcff8c7c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -8,12 +8,13 @@
*
* This is percpu allocator which can handle both static and dynamic
* areas. Percpu areas are allocated in chunks in vmalloc area. Each
- * chunk is consisted of num_possible_cpus() units and the first chunk
- * is used for static percpu variables in the kernel image (special
- * boot time alloc/init handling necessary as these areas need to be
- * brought up before allocation services are running). Unit grows as
- * necessary and all units grow or shrink in unison. When a chunk is
- * filled up, another chunk is allocated. ie. in vmalloc area
+ * chunk is consisted of boot-time determined number of units and the
+ * first chunk is used for static percpu variables in the kernel image
+ * (special boot time alloc/init handling necessary as these areas
+ * need to be brought up before allocation services are running).
+ * Unit grows as necessary and all units grow or shrink in unison.
+ * When a chunk is filled up, another chunk is allocated. ie. in
+ * vmalloc area
*
* c0 c1 c2
* ------------------- ------------------- ------------
@@ -22,11 +23,13 @@
*
* Allocation is done in offset-size areas of single unit space. Ie,
* an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
- * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
- * percpu base registers pcpu_unit_size apart.
+ * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
+ * cpus. On NUMA, the mapping can be non-linear and even sparse.
+ * Percpu access can be done by configuring percpu base registers
+ * according to cpu to unit mapping and pcpu_unit_size.
*
- * There are usually many small percpu allocations many of them as
- * small as 4 bytes. The allocator organizes chunks into lists
+ * There are usually many small percpu allocations many of them being
+ * as small as 4 bytes. The allocator organizes chunks into lists
* according to free size and tries to allocate from the fullest one.
* Each chunk keeps the maximum contiguous area size hint which is
* guaranteed to be eqaul to or larger than the maximum contiguous
@@ -43,7 +46,7 @@
*
* To use this allocator, arch code should do the followings.
*
- * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+ * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
*
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
* regular address to percpu pointer and back if they need to be
@@ -56,6 +59,7 @@
#include <linux/bitmap.h>
#include <linux/bootmem.h>
#include <linux/list.h>
+#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -94,20 +98,27 @@ struct pcpu_chunk {
int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */
bool immutable; /* no [de]population allowed */
- struct page **page; /* points to page array */
- struct page *page_ar[]; /* #cpus * UNIT_PAGES */
+ unsigned long populated[]; /* populated bitmap */
};
static int pcpu_unit_pages __read_mostly;
static int pcpu_unit_size __read_mostly;
+static int pcpu_nr_units __read_mostly;
static int pcpu_chunk_size __read_mostly;
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
+/* cpus with the lowest and highest unit numbers */
+static unsigned int pcpu_first_unit_cpu __read_mostly;
+static unsigned int pcpu_last_unit_cpu __read_mostly;
+
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __read_mostly;
EXPORT_SYMBOL_GPL(pcpu_base_addr);
+/* cpu -> unit map */
+const int *pcpu_unit_map __read_mostly;
+
/*
* The first chunk which always exists. Note that unlike other
* chunks, this one can be allocated and mapped in several different
@@ -129,9 +140,9 @@ static int pcpu_reserved_chunk_limit;
* Synchronization rules.
*
* There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
- * protects allocation/reclaim paths, chunks and chunk->page arrays.
- * The latter is a spinlock and protects the index data structures -
- * chunk slots, chunks and area maps in chunks.
+ * protects allocation/reclaim paths, chunks, populated bitmap and
+ * vmalloc mapping. The latter is a spinlock and protects the index
+ * data structures - chunk slots, chunks and area maps in chunks.
*
* During allocation, pcpu_alloc_mutex is kept locked all the time and
* pcpu_lock is grabbed and released as necessary. All actual memory
@@ -178,13 +189,7 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
static int pcpu_page_idx(unsigned int cpu, int page_idx)
{
- return cpu * pcpu_unit_pages + page_idx;
-}
-
-static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
- unsigned int cpu, int page_idx)
-{
- return &chunk->page[pcpu_page_idx(cpu, page_idx)];
+ return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
}
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
@@ -194,10 +199,13 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
(pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
}
-static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
- int page_idx)
+static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
{
- return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+ /* must not be used on pre-mapped chunk */
+ WARN_ON(chunk->immutable);
+
+ return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
}
/* set the pointer to a chunk in a page struct */
@@ -212,6 +220,34 @@ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
return (struct pcpu_chunk *)page->index;
}
+static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
+{
+ *rs = find_next_zero_bit(chunk->populated, end, *rs);
+ *re = find_next_bit(chunk->populated, end, *rs + 1);
+}
+
+static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
+{
+ *rs = find_next_bit(chunk->populated, end, *rs);
+ *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
+}
+
+/*
+ * (Un)populated page region iterators. Iterate over (un)populated
+ * page regions betwen @start and @end in @chunk. @rs and @re should
+ * be integer variables and will be set to start and end page index of
+ * the current region.
+ */
+#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
+ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
+ (rs) < (re); \
+ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
+
+#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
+ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
+ (rs) < (re); \
+ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
+
/**
* pcpu_mem_alloc - allocate memory
* @size: bytes to allocate
@@ -290,13 +326,21 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
void *first_start = pcpu_first_chunk->vm->addr;
/* is it in the first chunk? */
- if (addr >= first_start && addr < first_start + pcpu_chunk_size) {
+ if (addr >= first_start && addr < first_start + pcpu_unit_size) {
/* is it in the reserved area? */
if (addr < first_start + pcpu_reserved_chunk_limit)
return pcpu_reserved_chunk;
return pcpu_first_chunk;
}
+ /*
+ * The address is relative to unit0 which might be unused and
+ * thus unmapped. Offset the address to the unit space of the
+ * current processor before looking it up in the vmalloc
+ * space. Note that any possible cpu id can be used here, so
+ * there's no need to worry about preemption or cpu hotplug.
+ */
+ addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size;
return pcpu_get_page_chunk(vmalloc_to_page(addr));
}
@@ -545,126 +589,327 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
}
/**
- * pcpu_unmap - unmap pages out of a pcpu_chunk
+ * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
* @chunk: chunk of interest
- * @page_start: page index of the first page to unmap
- * @page_end: page index of the last page to unmap + 1
- * @flush: whether to flush cache and tlb or not
+ * @bitmapp: output parameter for bitmap
+ * @may_alloc: may allocate the array
*
- * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
- * If @flush is true, vcache is flushed before unmapping and tlb
- * after.
+ * Returns pointer to array of pointers to struct page and bitmap,
+ * both of which can be indexed with pcpu_page_idx(). The returned
+ * array is cleared to zero and *@bitmapp is copied from
+ * @chunk->populated. Note that there is only one array and bitmap
+ * and access exclusion is the caller's responsibility.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
+ * Otherwise, don't care.
+ *
+ * RETURNS:
+ * Pointer to temp pages array on success, NULL on failure.
*/
-static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
- bool flush)
+static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
+ unsigned long **bitmapp,
+ bool may_alloc)
{
- unsigned int last = num_possible_cpus() - 1;
- unsigned int cpu;
+ static struct page **pages;
+ static unsigned long *bitmap;
+ size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
+ size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
+ sizeof(unsigned long);
+
+ if (!pages || !bitmap) {
+ if (may_alloc && !pages)
+ pages = pcpu_mem_alloc(pages_size);
+ if (may_alloc && !bitmap)
+ bitmap = pcpu_mem_alloc(bitmap_size);
+ if (!pages || !bitmap)
+ return NULL;
+ }
- /* unmap must not be done on immutable chunk */
- WARN_ON(chunk->immutable);
+ memset(pages, 0, pages_size);
+ bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
- /*
- * Each flushing trial can be very expensive, issue flush on
- * the whole region at once rather than doing it for each cpu.
- * This could be an overkill but is more scalable.
- */
- if (flush)
- flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
+ *bitmapp = bitmap;
+ return pages;
+}
- for_each_possible_cpu(cpu)
- unmap_kernel_range_noflush(
- pcpu_chunk_addr(chunk, cpu, page_start),
- (page_end - page_start) << PAGE_SHIFT);
-
- /* ditto as flush_cache_vunmap() */
- if (flush)
- flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
+/**
+ * pcpu_free_pages - free pages which were allocated for @chunk
+ * @chunk: chunk pages were allocated for
+ * @pages: array of pages to be freed, indexed by pcpu_page_idx()
+ * @populated: populated bitmap
+ * @page_start: page index of the first page to be freed
+ * @page_end: page index of the last page to be freed + 1
+ *
+ * Free pages [@page_start and @page_end) in @pages for all units.
+ * The pages were allocated for @chunk.
+ */
+static void pcpu_free_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
+{
+ unsigned int cpu;
+ int i;
+
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page *page = pages[pcpu_page_idx(cpu, i)];
+
+ if (page)
+ __free_page(page);
+ }
+ }
}
/**
- * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
- * @chunk: chunk to depopulate
- * @off: offset to the area to depopulate
- * @size: size of the area to depopulate in bytes
- * @flush: whether to flush cache and tlb or not
- *
- * For each cpu, depopulate and unmap pages [@page_start,@page_end)
- * from @chunk. If @flush is true, vcache is flushed before unmapping
- * and tlb after.
- *
- * CONTEXT:
- * pcpu_alloc_mutex.
+ * pcpu_alloc_pages - allocates pages for @chunk
+ * @chunk: target chunk
+ * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
+ * @populated: populated bitmap
+ * @page_start: page index of the first page to be allocated
+ * @page_end: page index of the last page to be allocated + 1
+ *
+ * Allocate pages [@page_start,@page_end) into @pages for all units.
+ * The allocation is for @chunk. Percpu core doesn't care about the
+ * content of @pages and will pass it verbatim to pcpu_map_pages().
*/
-static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
- bool flush)
+static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
{
- int page_start = PFN_DOWN(off);
- int page_end = PFN_UP(off + size);
- int unmap_start = -1;
- int uninitialized_var(unmap_end);
+ const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
unsigned int cpu;
int i;
- for (i = page_start; i < page_end; i++) {
- for_each_possible_cpu(cpu) {
- struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
+
+ *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
+ if (!*pagep) {
+ pcpu_free_pages(chunk, pages, populated,
+ page_start, page_end);
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
- if (!*pagep)
- continue;
+/**
+ * pcpu_pre_unmap_flush - flush cache prior to unmapping
+ * @chunk: chunk the regions to be flushed belongs to
+ * @page_start: page index of the first page to be flushed
+ * @page_end: page index of the last page to be flushed + 1
+ *
+ * Pages in [@page_start,@page_end) of @chunk are about to be
+ * unmapped. Flush cache. As each flushing trial can be very
+ * expensive, issue flush on the whole region at once rather than
+ * doing it for each cpu. This could be an overkill but is more
+ * scalable.
+ */
+static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ flush_cache_vunmap(
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+}
- __free_page(*pagep);
+static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
+{
+ unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
+}
- /*
- * If it's partial depopulation, it might get
- * populated or depopulated again. Mark the
- * page gone.
- */
- *pagep = NULL;
+/**
+ * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @pages: pages array which can be used to pass information to free
+ * @populated: populated bitmap
+ * @page_start: page index of the first page to unmap
+ * @page_end: page index of the last page to unmap + 1
+ *
+ * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
+ * Corresponding elements in @pages were cleared by the caller and can
+ * be used to carry information to pcpu_free_pages() which will be
+ * called after all unmaps are finished. The caller should call
+ * proper pre/post flush functions.
+ */
+static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
+{
+ unsigned int cpu;
+ int i;
+
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page *page;
- unmap_start = unmap_start < 0 ? i : unmap_start;
- unmap_end = i + 1;
+ page = pcpu_chunk_page(chunk, cpu, i);
+ WARN_ON(!page);
+ pages[pcpu_page_idx(cpu, i)] = page;
}
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
+ page_end - page_start);
}
- if (unmap_start >= 0)
- pcpu_unmap(chunk, unmap_start, unmap_end, flush);
+ for (i = page_start; i < page_end; i++)
+ __clear_bit(i, populated);
+}
+
+/**
+ * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
+ * @chunk: pcpu_chunk the regions to be flushed belong to
+ * @page_start: page index of the first page to be flushed
+ * @page_end: page index of the last page to be flushed + 1
+ *
+ * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
+ * TLB for the regions. This can be skipped if the area is to be
+ * returned to vmalloc as vmalloc will handle TLB flushing lazily.
+ *
+ * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
+ * for the whole region.
+ */
+static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ flush_tlb_kernel_range(
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+}
+
+static int __pcpu_map_pages(unsigned long addr, struct page **pages,
+ int nr_pages)
+{
+ return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
+ PAGE_KERNEL, pages);
}
/**
- * pcpu_map - map pages into a pcpu_chunk
+ * pcpu_map_pages - map pages into a pcpu_chunk
* @chunk: chunk of interest
+ * @pages: pages array containing pages to be mapped
+ * @populated: populated bitmap
* @page_start: page index of the first page to map
* @page_end: page index of the last page to map + 1
*
- * For each cpu, map pages [@page_start,@page_end) into @chunk.
- * vcache is flushed afterwards.
+ * For each cpu, map pages [@page_start,@page_end) into @chunk. The
+ * caller is responsible for calling pcpu_post_map_flush() after all
+ * mappings are complete.
+ *
+ * This function is responsible for setting corresponding bits in
+ * @chunk->populated bitmap and whatever is necessary for reverse
+ * lookup (addr -> chunk).
*/
-static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
+static int pcpu_map_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
{
- unsigned int last = num_possible_cpus() - 1;
- unsigned int cpu;
- int err;
-
- /* map must not be done on immutable chunk */
- WARN_ON(chunk->immutable);
+ unsigned int cpu, tcpu;
+ int i, err;
for_each_possible_cpu(cpu) {
- err = map_kernel_range_noflush(
- pcpu_chunk_addr(chunk, cpu, page_start),
- (page_end - page_start) << PAGE_SHIFT,
- PAGE_KERNEL,
- pcpu_chunk_pagep(chunk, cpu, page_start));
+ err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
+ &pages[pcpu_page_idx(cpu, page_start)],
+ page_end - page_start);
if (err < 0)
- return err;
+ goto err;
+ }
+
+ /* mapping successful, link chunk and mark populated */
+ for (i = page_start; i < page_end; i++) {
+ for_each_possible_cpu(cpu)
+ pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
+ chunk);
+ __set_bit(i, populated);
}
- /* flush at once, please read comments in pcpu_unmap() */
- flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
return 0;
+
+err:
+ for_each_possible_cpu(tcpu) {
+ if (tcpu == cpu)
+ break;
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
+ page_end - page_start);
+ }
+ return err;
+}
+
+/**
+ * pcpu_post_map_flush - flush cache after mapping
+ * @chunk: pcpu_chunk the regions to be flushed belong to
+ * @page_start: page index of the first page to be flushed
+ * @page_end: page index of the last page to be flushed + 1
+ *
+ * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
+ * cache.
+ *
+ * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
+ * for the whole region.
+ */
+static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ flush_cache_vmap(
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+}
+
+/**
+ * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
+ * @chunk: chunk to depopulate
+ * @off: offset to the area to depopulate
+ * @size: size of the area to depopulate in bytes
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, depopulate and unmap pages [@page_start,@page_end)
+ * from @chunk. If @flush is true, vcache is flushed before unmapping
+ * and tlb after.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex.
+ */
+static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
+{
+ int page_start = PFN_DOWN(off);
+ int page_end = PFN_UP(off + size);
+ struct page **pages;
+ unsigned long *populated;
+ int rs, re;
+
+ /* quick path, check whether it's empty already */
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ if (rs == page_start && re == page_end)
+ return;
+ break;
+ }
+
+ /* immutable chunks can't be depopulated */
+ WARN_ON(chunk->immutable);
+
+ /*
+ * If control reaches here, there must have been at least one
+ * successful population attempt so the temp pages array must
+ * be available now.
+ */
+ pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
+ BUG_ON(!pages);
+
+ /* unmap and free */
+ pcpu_pre_unmap_flush(chunk, page_start, page_end);
+
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
+ pcpu_unmap_pages(chunk, pages, populated, rs, re);
+
+ /* no need to flush tlb, vmalloc will handle it lazily */
+
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
+ pcpu_free_pages(chunk, pages, populated, rs, re);
+
+ /* commit new bitmap */
+ bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}
/**
@@ -681,50 +926,60 @@ static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
*/
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
- const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
int page_start = PFN_DOWN(off);
int page_end = PFN_UP(off + size);
- int map_start = -1;
- int uninitialized_var(map_end);
+ int free_end = page_start, unmap_end = page_start;
+ struct page **pages;
+ unsigned long *populated;
unsigned int cpu;
- int i;
+ int rs, re, rc;
- for (i = page_start; i < page_end; i++) {
- if (pcpu_chunk_page_occupied(chunk, i)) {
- if (map_start >= 0) {
- if (pcpu_map(chunk, map_start, map_end))
- goto err;
- map_start = -1;
- }
- continue;
- }
+ /* quick path, check whether all pages are already there */
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
+ if (rs == page_start && re == page_end)
+ goto clear;
+ break;
+ }
- map_start = map_start < 0 ? i : map_start;
- map_end = i + 1;
+ /* need to allocate and map pages, this chunk can't be immutable */
+ WARN_ON(chunk->immutable);
- for_each_possible_cpu(cpu) {
- struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+ pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
+ if (!pages)
+ return -ENOMEM;
- *pagep = alloc_pages_node(cpu_to_node(cpu),
- alloc_mask, 0);
- if (!*pagep)
- goto err;
- pcpu_set_page_chunk(*pagep, chunk);
- }
+ /* alloc and map */
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
+ if (rc)
+ goto err_free;
+ free_end = re;
}
- if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
- goto err;
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ rc = pcpu_map_pages(chunk, pages, populated, rs, re);
+ if (rc)
+ goto err_unmap;
+ unmap_end = re;
+ }
+ pcpu_post_map_flush(chunk, page_start, page_end);
+ /* commit new bitmap */
+ bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
+clear:
for_each_possible_cpu(cpu)
- memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
- size);
-
+ memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
return 0;
-err:
- /* likely under heavy memory pressure, give memory back */
- pcpu_depopulate_chunk(chunk, off, size, true);
- return -ENOMEM;
+
+err_unmap:
+ pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
+ pcpu_unmap_pages(chunk, pages, populated, rs, re);
+ pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
+err_free:
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
+ pcpu_free_pages(chunk, pages, populated, rs, re);
+ return rc;
}
static void free_pcpu_chunk(struct pcpu_chunk *chunk)
@@ -748,7 +1003,6 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
chunk->map[chunk->map_used++] = pcpu_unit_size;
- chunk->page = chunk->page_ar;
chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
if (!chunk->vm) {
@@ -848,6 +1102,7 @@ area_found:
mutex_unlock(&pcpu_alloc_mutex);
+ /* return address relative to unit0 */
return __addr_to_pcpu_ptr(chunk->vm->addr + off);
fail_unlock:
@@ -929,7 +1184,7 @@ static void pcpu_reclaim(struct work_struct *work)
mutex_unlock(&pcpu_alloc_mutex);
list_for_each_entry_safe(chunk, next, &todo, list) {
- pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
+ pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
free_pcpu_chunk(chunk);
}
}
@@ -977,26 +1232,16 @@ EXPORT_SYMBOL_GPL(free_percpu);
/**
* pcpu_setup_first_chunk - initialize the first percpu chunk
- * @get_page_fn: callback to fetch page pointer
* @static_size: the size of static percpu area in bytes
- * @reserved_size: the size of reserved percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes, 0 for none
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
- * @base_addr: mapped address, NULL for auto
- * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE
+ * @base_addr: mapped address
+ * @unit_map: cpu -> unit map, NULL for sequential mapping
*
* Initialize the first percpu chunk which contains the kernel static
* perpcu area. This function is to be called from arch percpu area
- * setup path. The first two parameters are mandatory. The rest are
- * optional.
- *
- * @get_page_fn() should return pointer to percpu page given cpu
- * number and page number. It should at least return enough pages to
- * cover the static area. The returned pages for static area should
- * have been initialized with valid data. If @unit_size is specified,
- * it can also return pages after the static area. NULL return
- * indicates end of pages for the cpu. Note that @get_page_fn() must
- * return the same number of pages for all cpus.
+ * setup path.
*
* @reserved_size, if non-zero, specifies the amount of bytes to
* reserve after the static area in the first chunk. This reserves
@@ -1011,17 +1256,12 @@ EXPORT_SYMBOL_GPL(free_percpu);
* non-negative value makes percpu leave alone the area beyond
* @static_size + @reserved_size + @dyn_size.
*
- * @unit_size, if non-negative, specifies unit size and must be
- * aligned to PAGE_SIZE and equal to or larger than @static_size +
- * @reserved_size + if non-negative, @dyn_size.
+ * @unit_size specifies unit size and must be aligned to PAGE_SIZE and
+ * equal to or larger than @static_size + @reserved_size + if
+ * non-negative, @dyn_size.
*
- * Non-null @base_addr means that the caller already allocated virtual
- * region for the first chunk and mapped it. percpu must not mess
- * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
- * @populate_pte_fn doesn't make any sense.
- *
- * @populate_pte_fn is used to populate the pagetable. NULL means the
- * caller already populated the pagetable.
+ * The caller should have mapped the first chunk at @base_addr and
+ * copied static data to each unit.
*
* If the first chunk ends up with both reserved and dynamic areas, it
* is served by two chunks - one to serve the core static and reserved
@@ -1034,47 +1274,83 @@ EXPORT_SYMBOL_GPL(free_percpu);
* The determined pcpu_unit_size which can be used to initialize
* percpu access.
*/
-size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
- size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size,
- void *base_addr,
- pcpu_populate_pte_fn_t populate_pte_fn)
+size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, size_t unit_size,
+ void *base_addr, const int *unit_map)
{
static struct vm_struct first_vm;
static int smap[2], dmap[2];
size_t size_sum = static_size + reserved_size +
(dyn_size >= 0 ? dyn_size : 0);
struct pcpu_chunk *schunk, *dchunk = NULL;
- unsigned int cpu;
- int nr_pages;
- int err, i;
+ unsigned int cpu, tcpu;
+ int i;
- /* santiy checks */
+ /* sanity checks */
BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
BUG_ON(!static_size);
- if (unit_size >= 0) {
- BUG_ON(unit_size < size_sum);
- BUG_ON(unit_size & ~PAGE_MASK);
- BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
- } else
- BUG_ON(base_addr);
- BUG_ON(base_addr && populate_pte_fn);
-
- if (unit_size >= 0)
- pcpu_unit_pages = unit_size >> PAGE_SHIFT;
- else
- pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
- PFN_UP(size_sum));
+ BUG_ON(!base_addr);
+ BUG_ON(unit_size < size_sum);
+ BUG_ON(unit_size & ~PAGE_MASK);
+ BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
+
+ /* determine number of units and verify and initialize pcpu_unit_map */
+ if (unit_map) {
+ int first_unit = INT_MAX, last_unit = INT_MIN;
+
+ for_each_possible_cpu(cpu) {
+ int unit = unit_map[cpu];
+
+ BUG_ON(unit < 0);
+ for_each_possible_cpu(tcpu) {
+ if (tcpu == cpu)
+ break;
+ /* the mapping should be one-to-one */
+ BUG_ON(unit_map[tcpu] == unit);
+ }
+
+ if (unit < first_unit) {
+ pcpu_first_unit_cpu = cpu;
+ first_unit = unit;
+ }
+ if (unit > last_unit) {
+ pcpu_last_unit_cpu = cpu;
+ last_unit = unit;
+ }
+ }
+ pcpu_nr_units = last_unit + 1;
+ pcpu_unit_map = unit_map;
+ } else {
+ int *identity_map;
+
+ /* #units == #cpus, identity mapped */
+ identity_map = alloc_bootmem(num_possible_cpus() *
+ sizeof(identity_map[0]));
+ for_each_possible_cpu(cpu)
+ identity_map[cpu] = cpu;
+
+ pcpu_first_unit_cpu = 0;
+ pcpu_last_unit_cpu = pcpu_nr_units - 1;
+ pcpu_nr_units = num_possible_cpus();
+ pcpu_unit_map = identity_map;
+ }
+
+ /* determine basic parameters */
+ pcpu_unit_pages = unit_size >> PAGE_SHIFT;
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
- pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
- pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
- + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
+ pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size;
+ pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
+ BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
if (dyn_size < 0)
dyn_size = pcpu_unit_size - static_size - reserved_size;
+ first_vm.flags = VM_ALLOC;
+ first_vm.size = pcpu_chunk_size;
+ first_vm.addr = base_addr;
+
/*
* Allocate chunk slots. The additional last slot is for
* empty chunks.
@@ -1096,7 +1372,8 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
schunk->vm = &first_vm;
schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap);
- schunk->page = schunk->page_ar;
+ schunk->immutable = true;
+ bitmap_fill(schunk->populated, pcpu_unit_pages);
if (reserved_size) {
schunk->free_size = reserved_size;
@@ -1114,93 +1391,39 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
/* init dynamic chunk if necessary */
if (dyn_size) {
- dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
+ dchunk = alloc_bootmem(pcpu_chunk_struct_size);
INIT_LIST_HEAD(&dchunk->list);
dchunk->vm = &first_vm;
dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap);
- dchunk->page = schunk->page_ar; /* share page map with schunk */
+ dchunk->immutable = true;
+ bitmap_fill(dchunk->populated, pcpu_unit_pages);
dchunk->contig_hint = dchunk->free_size = dyn_size;
dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
dchunk->map[dchunk->map_used++] = dchunk->free_size;
}
- /* allocate vm address */
- first_vm.flags = VM_ALLOC;
- first_vm.size = pcpu_chunk_size;
-
- if (!base_addr)
- vm_area_register_early(&first_vm, PAGE_SIZE);
- else {
- /*
- * Pages already mapped. No need to remap into
- * vmalloc area. In this case the first chunks can't
- * be mapped or unmapped by percpu and are marked
- * immutable.
- */
- first_vm.addr = base_addr;
- schunk->immutable = true;
- if (dchunk)
- dchunk->immutable = true;
- }
-
- /* assign pages */
- nr_pages = -1;
- for_each_possible_cpu(cpu) {
- for (i = 0; i < pcpu_unit_pages; i++) {
- struct page *page = get_page_fn(cpu, i);
-
- if (!page)
- break;
- *pcpu_chunk_pagep(schunk, cpu, i) = page;
- }
-
- BUG_ON(i < PFN_UP(static_size));
-
- if (nr_pages < 0)
- nr_pages = i;
- else
- BUG_ON(nr_pages != i);
- }
-
- /* map them */
- if (populate_pte_fn) {
- for_each_possible_cpu(cpu)
- for (i = 0; i < nr_pages; i++)
- populate_pte_fn(pcpu_chunk_addr(schunk,
- cpu, i));
-
- err = pcpu_map(schunk, 0, nr_pages);
- if (err)
- panic("failed to setup static percpu area, err=%d\n",
- err);
- }
-
/* link the first chunk in */
pcpu_first_chunk = dchunk ?: schunk;
pcpu_chunk_relocate(pcpu_first_chunk, -1);
/* we're done */
- pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
+ pcpu_base_addr = schunk->vm->addr;
return pcpu_unit_size;
}
-/*
- * Embedding first chunk setup helper.
- */
-static void *pcpue_ptr __initdata;
-static size_t pcpue_size __initdata;
-static size_t pcpue_unit_size __initdata;
-
-static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
+static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size,
+ ssize_t *dyn_sizep)
{
- size_t off = (size_t)pageno << PAGE_SHIFT;
+ size_t size_sum;
- if (off >= pcpue_size)
- return NULL;
+ size_sum = PFN_ALIGN(static_size + reserved_size +
+ (*dyn_sizep >= 0 ? *dyn_sizep : 0));
+ if (*dyn_sizep != 0)
+ *dyn_sizep = size_sum - static_size - reserved_size;
- return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
+ return size_sum;
}
/**
@@ -1208,7 +1431,6 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
*
* This is a helper to ease setting up embedded first percpu chunk and
* can be called where pcpu_setup_first_chunk() is expected.
@@ -1220,9 +1442,9 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
* page size.
*
* When @dyn_size is positive, dynamic area might be larger than
- * specified to fill page alignment. Also, when @dyn_size is auto,
- * @dyn_size does not fill the whole first chunk but only what's
- * necessary for page alignment after static and reserved areas.
+ * specified to fill page alignment. When @dyn_size is auto,
+ * @dyn_size is just big enough to fill page alignment after static
+ * and reserved areas.
*
* If the needed size is smaller than the minimum or specified unit
* size, the leftover is returned to the bootmem allocator.
@@ -1232,42 +1454,562 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
* percpu access on success, -errno on failure.
*/
ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size)
+ ssize_t dyn_size)
{
+ size_t size_sum, unit_size, chunk_size;
+ void *base;
unsigned int cpu;
/* determine parameters and allocate */
- pcpue_size = PFN_ALIGN(static_size + reserved_size +
- (dyn_size >= 0 ? dyn_size : 0));
- if (dyn_size != 0)
- dyn_size = pcpue_size - static_size - reserved_size;
-
- if (unit_size >= 0) {
- BUG_ON(unit_size < pcpue_size);
- pcpue_unit_size = unit_size;
- } else
- pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
-
- pcpue_ptr = __alloc_bootmem_nopanic(
- num_possible_cpus() * pcpue_unit_size,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
- if (!pcpue_ptr)
+ size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
+
+ unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
+ chunk_size = unit_size * num_possible_cpus();
+
+ base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
+ if (!base) {
+ pr_warning("PERCPU: failed to allocate %zu bytes for "
+ "embedding\n", chunk_size);
return -ENOMEM;
+ }
/* return the leftover and copy */
for_each_possible_cpu(cpu) {
- void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
+ void *ptr = base + cpu * unit_size;
- free_bootmem(__pa(ptr + pcpue_size),
- pcpue_unit_size - pcpue_size);
+ free_bootmem(__pa(ptr + size_sum), unit_size - size_sum);
memcpy(ptr, __per_cpu_load, static_size);
}
/* we're ready, commit */
pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
- pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
+ size_sum >> PAGE_SHIFT, base, static_size);
+
+ return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size,
+ unit_size, base, NULL);
+}
+
+/**
+ * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
+ * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
+ * @populate_pte_fn: function to populate pte
+ *
+ * This is a helper to ease setting up embedded first percpu chunk and
+ * can be called where pcpu_setup_first_chunk() is expected.
+ *
+ * This is the basic allocator. Static percpu area is allocated
+ * page-by-page into vmalloc area.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access on success, -errno on failure.
+ */
+ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_populate_pte_fn_t populate_pte_fn)
+{
+ static struct vm_struct vm;
+ int unit_pages;
+ size_t pages_size;
+ struct page **pages;
+ unsigned int cpu;
+ int i, j;
+ ssize_t ret;
+
+ unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size,
+ PCPU_MIN_UNIT_SIZE));
+
+ /* unaligned allocations can't be freed, round up to page size */
+ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
+ sizeof(pages[0]));
+ pages = alloc_bootmem(pages_size);
+
+ /* allocate pages */
+ j = 0;
+ for_each_possible_cpu(cpu)
+ for (i = 0; i < unit_pages; i++) {
+ void *ptr;
+
+ ptr = alloc_fn(cpu, PAGE_SIZE);
+ if (!ptr) {
+ pr_warning("PERCPU: failed to allocate "
+ "4k page for cpu%u\n", cpu);
+ goto enomem;
+ }
+ pages[j++] = virt_to_page(ptr);
+ }
+
+ /* allocate vm area, map the pages and copy static data */
+ vm.flags = VM_ALLOC;
+ vm.size = num_possible_cpus() * unit_pages << PAGE_SHIFT;
+ vm_area_register_early(&vm, PAGE_SIZE);
+
+ for_each_possible_cpu(cpu) {
+ unsigned long unit_addr = (unsigned long)vm.addr +
+ (cpu * unit_pages << PAGE_SHIFT);
+
+ for (i = 0; i < unit_pages; i++)
+ populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
+
+ /* pte already populated, the following shouldn't fail */
+ ret = __pcpu_map_pages(unit_addr, &pages[cpu * unit_pages],
+ unit_pages);
+ if (ret < 0)
+ panic("failed to map percpu area, err=%zd\n", ret);
+
+ /*
+ * FIXME: Archs with virtual cache should flush local
+ * cache for the linear mapping here - something
+ * equivalent to flush_cache_vmap() on the local cpu.
+ * flush_cache_vmap() can't be used as most supporting
+ * data structures are not set up yet.
+ */
+
+ /* copy static data */
+ memcpy((void *)unit_addr, __per_cpu_load, static_size);
+ }
+
+ /* we're ready, commit */
+ pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n",
+ unit_pages, static_size);
+
+ ret = pcpu_setup_first_chunk(static_size, reserved_size, -1,
+ unit_pages << PAGE_SHIFT, vm.addr, NULL);
+ goto out_free_ar;
+
+enomem:
+ while (--j >= 0)
+ free_fn(page_address(pages[j]), PAGE_SIZE);
+ ret = -ENOMEM;
+out_free_ar:
+ free_bootmem(__pa(pages), pages_size);
+ return ret;
+}
+
+/*
+ * Large page remapping first chunk setup helper
+ */
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
+/**
+ * pcpu_lpage_build_unit_map - build unit_map for large page remapping
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_sizep: in/out parameter for dynamic size, -1 for auto
+ * @unit_sizep: out parameter for unit size
+ * @unit_map: unit_map to be filled
+ * @cpu_distance_fn: callback to determine distance between cpus
+ *
+ * This function builds cpu -> unit map and determine other parameters
+ * considering needed percpu size, large page size and distances
+ * between CPUs in NUMA.
+ *
+ * CPUs which are of LOCAL_DISTANCE both ways are grouped together and
+ * may share units in the same large page. The returned configuration
+ * is guaranteed to have CPUs on different nodes on different large
+ * pages and >=75% usage of allocated virtual address space.
+ *
+ * RETURNS:
+ * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and
+ * returns the number of units to be allocated. -errno on failure.
+ */
+int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size,
+ ssize_t *dyn_sizep, size_t *unit_sizep,
+ size_t lpage_size, int *unit_map,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
+{
+ static int group_map[NR_CPUS] __initdata;
+ static int group_cnt[NR_CPUS] __initdata;
+ int group_cnt_max = 0;
+ size_t size_sum, min_unit_size, alloc_size;
+ int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
+ int last_allocs;
+ unsigned int cpu, tcpu;
+ int group, unit;
+
+ /*
+ * Determine min_unit_size, alloc_size and max_upa such that
+ * alloc_size is multiple of lpage_size and is the smallest
+ * which can accomodate 4k aligned segments which are equal to
+ * or larger than min_unit_size.
+ */
+ size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, dyn_sizep);
+ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
+
+ alloc_size = roundup(min_unit_size, lpage_size);
+ upa = alloc_size / min_unit_size;
+ while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
+ upa--;
+ max_upa = upa;
+
+ /* group cpus according to their proximity */
+ for_each_possible_cpu(cpu) {
+ group = 0;
+ next_group:
+ for_each_possible_cpu(tcpu) {
+ if (cpu == tcpu)
+ break;
+ if (group_map[tcpu] == group &&
+ (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
+ cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
+ group++;
+ goto next_group;
+ }
+ }
+ group_map[cpu] = group;
+ group_cnt[group]++;
+ group_cnt_max = max(group_cnt_max, group_cnt[group]);
+ }
+
+ /*
+ * Expand unit size until address space usage goes over 75%
+ * and then as much as possible without using more address
+ * space.
+ */
+ last_allocs = INT_MAX;
+ for (upa = max_upa; upa; upa--) {
+ int allocs = 0, wasted = 0;
+
+ if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
+ continue;
+
+ for (group = 0; group_cnt[group]; group++) {
+ int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
+ allocs += this_allocs;
+ wasted += this_allocs * upa - group_cnt[group];
+ }
+
+ /*
+ * Don't accept if wastage is over 25%. The
+ * greater-than comparison ensures upa==1 always
+ * passes the following check.
+ */
+ if (wasted > num_possible_cpus() / 3)
+ continue;
+
+ /* and then don't consume more memory */
+ if (allocs > last_allocs)
+ break;
+ last_allocs = allocs;
+ best_upa = upa;
+ }
+ *unit_sizep = alloc_size / best_upa;
+
+ /* assign units to cpus accordingly */
+ unit = 0;
+ for (group = 0; group_cnt[group]; group++) {
+ for_each_possible_cpu(cpu)
+ if (group_map[cpu] == group)
+ unit_map[cpu] = unit++;
+ unit = roundup(unit, best_upa);
+ }
+
+ return unit; /* unit contains aligned number of units */
+}
+
+struct pcpul_ent {
+ void *ptr;
+ void *map_addr;
+};
+
+static size_t pcpul_size;
+static size_t pcpul_lpage_size;
+static int pcpul_nr_lpages;
+static struct pcpul_ent *pcpul_map;
+
+static bool __init pcpul_unit_to_cpu(int unit, const int *unit_map,
+ unsigned int *cpup)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ if (unit_map[cpu] == unit) {
+ if (cpup)
+ *cpup = cpu;
+ return true;
+ }
+
+ return false;
+}
+
+static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size,
+ size_t reserved_size, size_t dyn_size,
+ size_t unit_size, size_t lpage_size,
+ const int *unit_map, int nr_units)
+{
+ int width = 1, v = nr_units;
+ char empty_str[] = "--------";
+ int upl, lpl; /* units per lpage, lpage per line */
+ unsigned int cpu;
+ int lpage, unit;
+
+ while (v /= 10)
+ width++;
+ empty_str[min_t(int, width, sizeof(empty_str) - 1)] = '\0';
+
+ upl = max_t(int, lpage_size / unit_size, 1);
+ lpl = rounddown_pow_of_two(max_t(int, 60 / (upl * (width + 1) + 2), 1));
+
+ printk("%spcpu-lpage: sta/res/dyn=%zu/%zu/%zu unit=%zu lpage=%zu", lvl,
+ static_size, reserved_size, dyn_size, unit_size, lpage_size);
+
+ for (lpage = 0, unit = 0; unit < nr_units; unit++) {
+ if (!(unit % upl)) {
+ if (!(lpage++ % lpl)) {
+ printk("\n");
+ printk("%spcpu-lpage: ", lvl);
+ } else
+ printk("| ");
+ }
+ if (pcpul_unit_to_cpu(unit, unit_map, &cpu))
+ printk("%0*d ", width, cpu);
+ else
+ printk("%s ", empty_str);
+ }
+ printk("\n");
+}
+
+/**
+ * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_size: free size for dynamic allocation in bytes
+ * @unit_size: unit size in bytes
+ * @lpage_size: the size of a large page
+ * @unit_map: cpu -> unit mapping
+ * @nr_units: the number of units
+ * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
+ * @free_fn: function to free percpu memory, @size <= lpage_size
+ * @map_fn: function to map percpu lpage, always called with lpage_size
+ *
+ * This allocator uses large page to build and map the first chunk.
+ * Unlike other helpers, the caller should always specify @dyn_size
+ * and @unit_size. These parameters along with @unit_map and
+ * @nr_units can be determined using pcpu_lpage_build_unit_map().
+ * This two stage initialization is to allow arch code to evaluate the
+ * parameters before committing to it.
+ *
+ * Large pages are allocated as directed by @unit_map and other
+ * parameters and mapped to vmalloc space. Unused holes are returned
+ * to the page allocator. Note that these holes end up being actively
+ * mapped twice - once to the physical mapping and to the vmalloc area
+ * for the first percpu chunk. Depending on architecture, this might
+ * cause problem when changing page attributes of the returned area.
+ * These double mapped areas can be detected using
+ * pcpu_lpage_remapped().
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access on success, -errno on failure.
+ */
+ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size,
+ size_t dyn_size, size_t unit_size,
+ size_t lpage_size, const int *unit_map,
+ int nr_units,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_map_fn_t map_fn)
+{
+ static struct vm_struct vm;
+ size_t chunk_size = unit_size * nr_units;
+ size_t map_size;
+ unsigned int cpu;
+ ssize_t ret;
+ int i, j, unit;
+
+ pcpul_lpage_dump_cfg(KERN_DEBUG, static_size, reserved_size, dyn_size,
+ unit_size, lpage_size, unit_map, nr_units);
+
+ BUG_ON(chunk_size % lpage_size);
+
+ pcpul_size = static_size + reserved_size + dyn_size;
+ pcpul_lpage_size = lpage_size;
+ pcpul_nr_lpages = chunk_size / lpage_size;
+
+ /* allocate pointer array and alloc large pages */
+ map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]);
+ pcpul_map = alloc_bootmem(map_size);
+
+ /* allocate all pages */
+ for (i = 0; i < pcpul_nr_lpages; i++) {
+ size_t offset = i * lpage_size;
+ int first_unit = offset / unit_size;
+ int last_unit = (offset + lpage_size - 1) / unit_size;
+ void *ptr;
+
+ /* find out which cpu is mapped to this unit */
+ for (unit = first_unit; unit <= last_unit; unit++)
+ if (pcpul_unit_to_cpu(unit, unit_map, &cpu))
+ goto found;
+ continue;
+ found:
+ ptr = alloc_fn(cpu, lpage_size);
+ if (!ptr) {
+ pr_warning("PERCPU: failed to allocate large page "
+ "for cpu%u\n", cpu);
+ goto enomem;
+ }
+
+ pcpul_map[i].ptr = ptr;
+ }
+
+ /* return unused holes */
+ for (unit = 0; unit < nr_units; unit++) {
+ size_t start = unit * unit_size;
+ size_t end = start + unit_size;
+ size_t off, next;
+
+ /* don't free used part of occupied unit */
+ if (pcpul_unit_to_cpu(unit, unit_map, NULL))
+ start += pcpul_size;
+
+ /* unit can span more than one page, punch the holes */
+ for (off = start; off < end; off = next) {
+ void *ptr = pcpul_map[off / lpage_size].ptr;
+ next = min(roundup(off + 1, lpage_size), end);
+ if (ptr)
+ free_fn(ptr + off % lpage_size, next - off);
+ }
+ }
+
+ /* allocate address, map and copy */
+ vm.flags = VM_ALLOC;
+ vm.size = chunk_size;
+ vm_area_register_early(&vm, unit_size);
+
+ for (i = 0; i < pcpul_nr_lpages; i++) {
+ if (!pcpul_map[i].ptr)
+ continue;
+ pcpul_map[i].map_addr = vm.addr + i * lpage_size;
+ map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr);
+ }
+
+ for_each_possible_cpu(cpu)
+ memcpy(vm.addr + unit_map[cpu] * unit_size, __per_cpu_load,
+ static_size);
+
+ /* we're ready, commit */
+ pr_info("PERCPU: Remapped at %p with large pages, static data "
+ "%zu bytes\n", vm.addr, static_size);
+
+ ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size,
+ unit_size, vm.addr, unit_map);
+
+ /*
+ * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped
+ * lpages are pushed to the end and trimmed.
+ */
+ for (i = 0; i < pcpul_nr_lpages - 1; i++)
+ for (j = i + 1; j < pcpul_nr_lpages; j++) {
+ struct pcpul_ent tmp;
+
+ if (!pcpul_map[j].ptr)
+ continue;
+ if (pcpul_map[i].ptr &&
+ pcpul_map[i].ptr < pcpul_map[j].ptr)
+ continue;
+
+ tmp = pcpul_map[i];
+ pcpul_map[i] = pcpul_map[j];
+ pcpul_map[j] = tmp;
+ }
+
+ while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr)
+ pcpul_nr_lpages--;
+
+ return ret;
+
+enomem:
+ for (i = 0; i < pcpul_nr_lpages; i++)
+ if (pcpul_map[i].ptr)
+ free_fn(pcpul_map[i].ptr, lpage_size);
+ free_bootmem(__pa(pcpul_map), map_size);
+ return -ENOMEM;
+}
- return pcpu_setup_first_chunk(pcpue_get_page, static_size,
- reserved_size, dyn_size,
- pcpue_unit_size, pcpue_ptr, NULL);
+/**
+ * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
+ * @kaddr: the kernel address in question
+ *
+ * Determine whether @kaddr falls in the pcpul recycled area. This is
+ * used by pageattr to detect VM aliases and break up the pcpu large
+ * page mapping such that the same physical page is not mapped under
+ * different attributes.
+ *
+ * The recycled area is always at the tail of a partially used large
+ * page.
+ *
+ * RETURNS:
+ * Address of corresponding remapped pcpu address if match is found;
+ * otherwise, NULL.
+ */
+void *pcpu_lpage_remapped(void *kaddr)
+{
+ unsigned long lpage_mask = pcpul_lpage_size - 1;
+ void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask);
+ unsigned long offset = (unsigned long)kaddr & lpage_mask;
+ int left = 0, right = pcpul_nr_lpages - 1;
+ int pos;
+
+ /* pcpul in use at all? */
+ if (!pcpul_map)
+ return NULL;
+
+ /* okay, perform binary search */
+ while (left <= right) {
+ pos = (left + right) / 2;
+
+ if (pcpul_map[pos].ptr < lpage_addr)
+ left = pos + 1;
+ else if (pcpul_map[pos].ptr > lpage_addr)
+ right = pos - 1;
+ else
+ return pcpul_map[pos].map_addr + offset;
+ }
+
+ return NULL;
+}
+#endif
+
+/*
+ * Generic percpu area setup.
+ *
+ * The embedding helper is used because its behavior closely resembles
+ * the original non-dynamic generic percpu area setup. This is
+ * important because many archs have addressing restrictions and might
+ * fail if the percpu area is located far away from the previous
+ * location. As an added bonus, in non-NUMA cases, embedding is
+ * generally a good idea TLB-wise because percpu area can piggy back
+ * on the physical linear memory mapping which uses large page
+ * mappings on applicable archs.
+ */
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+void __init setup_per_cpu_areas(void)
+{
+ size_t static_size = __per_cpu_end - __per_cpu_start;
+ ssize_t unit_size;
+ unsigned long delta;
+ unsigned int cpu;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE);
+ if (unit_size < 0)
+ panic("Failed to initialized percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + cpu * unit_size;
}
+#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index e66d07d1b4ff..6633965bb27b 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/quicklist.h>
-DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
+DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
#define FRACTION_OF_NODE_MEM 16
@@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned long min_pages)
int node = numa_node_id();
struct zone *zones = NODE_DATA(node)->node_zones;
int num_cpus_on_node;
- const struct cpumask *cpumask_on_node = cpumask_of_node(node);
node_free_pages =
#ifdef CONFIG_ZONE_DMA
@@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned long min_pages)
max = node_free_pages / FRACTION_OF_NODE_MEM;
- num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
+ num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
max /= num_cpus_on_node;
return max(max, min_pages);
diff --git a/mm/slab.c b/mm/slab.c
index e74a16e4ced6..7b5d4deacfcd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1544,9 +1544,6 @@ void __init kmem_cache_init(void)
}
g_cpucache_up = EARLY;
-
- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
}
void __init kmem_cache_init_late(void)
@@ -1563,6 +1560,9 @@ void __init kmem_cache_init_late(void)
/* Done! */
g_cpucache_up = FULL;
+ /* Annotate slab for lockdep -- annotate the malloc caches */
+ init_lock_keys();
+
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
@@ -2547,7 +2547,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
}
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
- synchronize_rcu();
+ rcu_barrier();
__kmem_cache_destroy(cachep);
mutex_unlock(&cache_chain_mutex);
diff --git a/mm/slob.c b/mm/slob.c
index c78742defdc6..9641da3d5e58 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -595,6 +595,8 @@ EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *c)
{
kmemleak_free(c);
+ if (c->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/slqb.c b/mm/slqb.c
new file mode 100644
index 000000000000..b986604cab7a
--- /dev/null
+++ b/mm/slqb.c
@@ -0,0 +1,3765 @@
+/*
+ * SLQB: A slab allocator that focuses on per-CPU scaling, and good performance
+ * with order-0 allocations. Fastpaths emphasis is placed on local allocaiton
+ * and freeing, but with a secondary goal of good remote freeing (freeing on
+ * another CPU from that which allocated).
+ *
+ * Using ideas and code from mm/slab.c, mm/slob.c, and mm/slub.c.
+ */
+
+#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/mempolicy.h>
+#include <linux/ctype.h>
+#include <linux/kallsyms.h>
+#include <linux/memory.h>
+
+/*
+ * TODO
+ * - fix up releasing of offlined data structures. Not a big deal because
+ * they don't get cumulatively leaked with successive online/offline cycles
+ * - allow OOM conditions to flush back per-CPU pages to common lists to be
+ * reused by other CPUs.
+ * - investiage performance with memoryless nodes. Perhaps CPUs can be given
+ * a default closest home node via which it can use fastpath functions.
+ * Perhaps it is not a big problem.
+ */
+
+/*
+ * slqb_page overloads struct page, and is used to manage some slob allocation
+ * aspects, however to avoid the horrible mess in include/linux/mm_types.h,
+ * we'll just define our own struct slqb_page type variant here.
+ */
+struct slqb_page {
+ union {
+ struct {
+ unsigned long flags; /* mandatory */
+ atomic_t _count; /* mandatory */
+ unsigned int inuse; /* Nr of objects */
+ struct kmem_cache_list *list; /* Pointer to list */
+ void **freelist; /* LIFO freelist */
+ union {
+ struct list_head lru; /* misc. list */
+ struct rcu_head rcu_head; /* for rcu freeing */
+ };
+ };
+ struct page page;
+ };
+};
+static inline void struct_slqb_page_wrong_size(void)
+{ BUILD_BUG_ON(sizeof(struct slqb_page) != sizeof(struct page)); }
+
+#define PG_SLQB_BIT (1 << PG_slab)
+
+/*
+ * slqb_min_order: minimum allocation order for slabs
+ */
+static int slqb_min_order;
+
+/*
+ * slqb_min_objects: minimum number of objects per slab. Increasing this
+ * will increase the allocation order for slabs with larger objects
+ */
+static int slqb_min_objects = 1;
+
+#ifdef CONFIG_NUMA
+static inline int slab_numa(struct kmem_cache *s)
+{
+ return s->flags & SLAB_NUMA;
+}
+#else
+static inline int slab_numa(struct kmem_cache *s)
+{
+ return 0;
+}
+#endif
+
+static inline int slab_hiwater(struct kmem_cache *s)
+{
+ return s->hiwater;
+}
+
+static inline int slab_freebatch(struct kmem_cache *s)
+{
+ return s->freebatch;
+}
+
+/*
+ * Lock order:
+ * kmem_cache_node->list_lock
+ * kmem_cache_remote_free->lock
+ *
+ * Data structures:
+ * SLQB is primarily per-cpu. For each kmem_cache, each CPU has:
+ *
+ * - A LIFO list of node-local objects. Allocation and freeing of node local
+ * objects goes first to this list.
+ *
+ * - 2 Lists of slab pages, free and partial pages. If an allocation misses
+ * the object list, it tries from the partial list, then the free list.
+ * After freeing an object to the object list, if it is over a watermark,
+ * some objects are freed back to pages. If an allocation misses these lists,
+ * a new slab page is allocated from the page allocator. If the free list
+ * reaches a watermark, some of its pages are returned to the page allocator.
+ *
+ * - A remote free queue, where objects freed that did not come from the local
+ * node are queued to. When this reaches a watermark, the objects are
+ * flushed.
+ *
+ * - A remotely freed queue, where objects allocated from this CPU are flushed
+ * to from other CPUs' remote free queues. kmem_cache_remote_free->lock is
+ * used to protect access to this queue.
+ *
+ * When the remotely freed queue reaches a watermark, a flag is set to tell
+ * the owner CPU to check it. The owner CPU will then check the queue on the
+ * next allocation that misses the object list. It will move all objects from
+ * this list onto the object list and then allocate one.
+ *
+ * This system of remote queueing is intended to reduce lock and remote
+ * cacheline acquisitions, and give a cooling off period for remotely freed
+ * objects before they are re-allocated.
+ *
+ * node specific allocations from somewhere other than the local node are
+ * handled by a per-node list which is the same as the above per-CPU data
+ * structures except for the following differences:
+ *
+ * - kmem_cache_node->list_lock is used to protect access for multiple CPUs to
+ * allocate from a given node.
+ *
+ * - There is no remote free queue. Nodes don't free objects, CPUs do.
+ */
+
+static inline void slqb_stat_inc(struct kmem_cache_list *list,
+ enum stat_item si)
+{
+#ifdef CONFIG_SLQB_STATS
+ list->stats[si]++;
+#endif
+}
+
+static inline void slqb_stat_add(struct kmem_cache_list *list,
+ enum stat_item si, unsigned long nr)
+{
+#ifdef CONFIG_SLQB_STATS
+ list->stats[si] += nr;
+#endif
+}
+
+static inline int slqb_page_to_nid(struct slqb_page *page)
+{
+ return page_to_nid(&page->page);
+}
+
+static inline void *slqb_page_address(struct slqb_page *page)
+{
+ return page_address(&page->page);
+}
+
+static inline struct zone *slqb_page_zone(struct slqb_page *page)
+{
+ return page_zone(&page->page);
+}
+
+static inline int virt_to_nid(const void *addr)
+{
+ return page_to_nid(virt_to_page(addr));
+}
+
+static inline struct slqb_page *virt_to_head_slqb_page(const void *addr)
+{
+ struct page *p;
+
+ p = virt_to_head_page(addr);
+ return (struct slqb_page *)p;
+}
+
+static inline void __free_slqb_pages(struct slqb_page *page, unsigned int order,
+ int pages)
+{
+ struct page *p = &page->page;
+
+ reset_page_mapcount(p);
+ p->mapping = NULL;
+ VM_BUG_ON(!(p->flags & PG_SLQB_BIT));
+ p->flags &= ~PG_SLQB_BIT;
+
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += pages;
+ __free_pages(p, order);
+}
+
+#ifdef CONFIG_SLQB_DEBUG
+static inline int slab_debug(struct kmem_cache *s)
+{
+ return s->flags &
+ (SLAB_DEBUG_FREE |
+ SLAB_RED_ZONE |
+ SLAB_POISON |
+ SLAB_STORE_USER |
+ SLAB_TRACE);
+}
+static inline int slab_poison(struct kmem_cache *s)
+{
+ return s->flags & SLAB_POISON;
+}
+#else
+static inline int slab_debug(struct kmem_cache *s)
+{
+ return 0;
+}
+static inline int slab_poison(struct kmem_cache *s)
+{
+ return 0;
+}
+#endif
+
+#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
+ SLAB_POISON | SLAB_STORE_USER)
+
+/* Internal SLQB flags */
+#define __OBJECT_POISON 0x80000000 /* Poison object */
+
+/* Not all arches define cache_line_size */
+#ifndef cache_line_size
+#define cache_line_size() L1_CACHE_BYTES
+#endif
+
+#ifdef CONFIG_SMP
+static struct notifier_block slab_notifier;
+#endif
+
+/*
+ * slqb_lock protects slab_caches list and serialises hotplug operations.
+ * hotplug operations take lock for write, other operations can hold off
+ * hotplug by taking it for read (or write).
+ */
+static DECLARE_RWSEM(slqb_lock);
+
+/*
+ * A list of all slab caches on the system
+ */
+static LIST_HEAD(slab_caches);
+
+/*
+ * Tracking user of a slab.
+ */
+struct track {
+ unsigned long addr; /* Called from address */
+ int cpu; /* Was running on cpu */
+ int pid; /* Pid context */
+ unsigned long when; /* When did the operation occur */
+};
+
+enum track_item { TRACK_ALLOC, TRACK_FREE };
+
+static struct kmem_cache kmem_cache_cache;
+
+#ifdef CONFIG_SLQB_SYSFS
+static int sysfs_slab_add(struct kmem_cache *s);
+static void sysfs_slab_remove(struct kmem_cache *s);
+#else
+static inline int sysfs_slab_add(struct kmem_cache *s)
+{
+ return 0;
+}
+static inline void sysfs_slab_remove(struct kmem_cache *s)
+{
+ kmem_cache_free(&kmem_cache_cache, s);
+}
+#endif
+
+/********************************************************************
+ * Core slab cache functions
+ *******************************************************************/
+
+static int __slab_is_available __read_mostly;
+int slab_is_available(void)
+{
+ return __slab_is_available;
+}
+
+static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
+{
+#ifdef CONFIG_SMP
+ VM_BUG_ON(!s->cpu_slab[cpu]);
+ return s->cpu_slab[cpu];
+#else
+ return &s->cpu_slab;
+#endif
+}
+
+static inline int check_valid_pointer(struct kmem_cache *s,
+ struct slqb_page *page, const void *object)
+{
+ void *base;
+
+ base = slqb_page_address(page);
+ if (object < base || object >= base + s->objects * s->size ||
+ (object - base) % s->size) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void *get_freepointer(struct kmem_cache *s, void *object)
+{
+ return *(void **)(object + s->offset);
+}
+
+static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
+{
+ *(void **)(object + s->offset) = fp;
+}
+
+/* Loop over all objects in a slab */
+#define for_each_object(__p, __s, __addr) \
+ for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
+ __p += (__s)->size)
+
+/* Scan freelist */
+#define for_each_free_object(__p, __s, __free) \
+ for (__p = (__free); (__p) != NULL; __p = get_freepointer((__s),\
+ __p))
+
+#ifdef CONFIG_SLQB_DEBUG
+/*
+ * Debug settings:
+ */
+#ifdef CONFIG_SLQB_DEBUG_ON
+static int slqb_debug __read_mostly = DEBUG_DEFAULT_FLAGS;
+#else
+static int slqb_debug __read_mostly;
+#endif
+
+static char *slqb_debug_slabs;
+
+/*
+ * Object debugging
+ */
+static void print_section(char *text, u8 *addr, unsigned int length)
+{
+ int i, offset;
+ int newline = 1;
+ char ascii[17];
+
+ ascii[16] = 0;
+
+ for (i = 0; i < length; i++) {
+ if (newline) {
+ printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
+ newline = 0;
+ }
+ printk(KERN_CONT " %02x", addr[i]);
+ offset = i % 16;
+ ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
+ if (offset == 15) {
+ printk(KERN_CONT " %s\n", ascii);
+ newline = 1;
+ }
+ }
+ if (!newline) {
+ i %= 16;
+ while (i < 16) {
+ printk(KERN_CONT " ");
+ ascii[i] = ' ';
+ i++;
+ }
+ printk(KERN_CONT " %s\n", ascii);
+ }
+}
+
+static struct track *get_track(struct kmem_cache *s, void *object,
+ enum track_item alloc)
+{
+ struct track *p;
+
+ if (s->offset)
+ p = object + s->offset + sizeof(void *);
+ else
+ p = object + s->inuse;
+
+ return p + alloc;
+}
+
+static void set_track(struct kmem_cache *s, void *object,
+ enum track_item alloc, unsigned long addr)
+{
+ struct track *p;
+
+ if (s->offset)
+ p = object + s->offset + sizeof(void *);
+ else
+ p = object + s->inuse;
+
+ p += alloc;
+ if (addr) {
+ p->addr = addr;
+ p->cpu = raw_smp_processor_id();
+ p->pid = current ? current->pid : -1;
+ p->when = jiffies;
+ } else
+ memset(p, 0, sizeof(struct track));
+}
+
+static void init_tracking(struct kmem_cache *s, void *object)
+{
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
+ set_track(s, object, TRACK_FREE, 0UL);
+ set_track(s, object, TRACK_ALLOC, 0UL);
+}
+
+static void print_track(const char *s, struct track *t)
+{
+ if (!t->addr)
+ return;
+
+ printk(KERN_ERR "INFO: %s in ", s);
+ __print_symbol("%s", (unsigned long)t->addr);
+ printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
+}
+
+static void print_tracking(struct kmem_cache *s, void *object)
+{
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
+ print_track("Allocated", get_track(s, object, TRACK_ALLOC));
+ print_track("Freed", get_track(s, object, TRACK_FREE));
+}
+
+static void print_page_info(struct slqb_page *page)
+{
+ printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
+ page, page->inuse, page->freelist, page->flags);
+
+}
+
+#define MAX_ERR_STR 100
+static void slab_bug(struct kmem_cache *s, char *fmt, ...)
+{
+ va_list args;
+ char buf[MAX_ERR_STR];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ printk(KERN_ERR "========================================"
+ "=====================================\n");
+ printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
+ printk(KERN_ERR "----------------------------------------"
+ "-------------------------------------\n\n");
+}
+
+static void slab_fix(struct kmem_cache *s, char *fmt, ...)
+{
+ va_list args;
+ char buf[100];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
+}
+
+static void print_trailer(struct kmem_cache *s, struct slqb_page *page, u8 *p)
+{
+ unsigned int off; /* Offset of last byte */
+ u8 *addr = slqb_page_address(page);
+
+ print_tracking(s, p);
+
+ print_page_info(page);
+
+ printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
+ p, p - addr, get_freepointer(s, p));
+
+ if (p > addr + 16)
+ print_section("Bytes b4", p - 16, 16);
+
+ print_section("Object", p, min(s->objsize, 128));
+
+ if (s->flags & SLAB_RED_ZONE)
+ print_section("Redzone", p + s->objsize, s->inuse - s->objsize);
+
+ if (s->offset)
+ off = s->offset + sizeof(void *);
+ else
+ off = s->inuse;
+
+ if (s->flags & SLAB_STORE_USER)
+ off += 2 * sizeof(struct track);
+
+ if (off != s->size) {
+ /* Beginning of the filler is the free pointer */
+ print_section("Padding", p + off, s->size - off);
+ }
+
+ dump_stack();
+}
+
+static void object_err(struct kmem_cache *s, struct slqb_page *page,
+ u8 *object, char *reason)
+{
+ slab_bug(s, reason);
+ print_trailer(s, page, object);
+}
+
+static void slab_err(struct kmem_cache *s, struct slqb_page *page,
+ char *fmt, ...)
+{
+ slab_bug(s, fmt);
+ print_page_info(page);
+ dump_stack();
+}
+
+static void init_object(struct kmem_cache *s, void *object, int active)
+{
+ u8 *p = object;
+
+ if (s->flags & __OBJECT_POISON) {
+ memset(p, POISON_FREE, s->objsize - 1);
+ p[s->objsize - 1] = POISON_END;
+ }
+
+ if (s->flags & SLAB_RED_ZONE) {
+ memset(p + s->objsize,
+ active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
+ s->inuse - s->objsize);
+ }
+}
+
+static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
+{
+ while (bytes) {
+ if (*start != (u8)value)
+ return start;
+ start++;
+ bytes--;
+ }
+ return NULL;
+}
+
+static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
+ void *from, void *to)
+{
+ slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
+ memset(from, data, to - from);
+}
+
+static int check_bytes_and_report(struct kmem_cache *s, struct slqb_page *page,
+ u8 *object, char *what,
+ u8 *start, unsigned int value, unsigned int bytes)
+{
+ u8 *fault;
+ u8 *end;
+
+ fault = check_bytes(start, value, bytes);
+ if (!fault)
+ return 1;
+
+ end = start + bytes;
+ while (end > fault && end[-1] == value)
+ end--;
+
+ slab_bug(s, "%s overwritten", what);
+ printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
+ fault, end - 1, fault[0], value);
+ print_trailer(s, page, object);
+
+ restore_bytes(s, what, value, fault, end);
+ return 0;
+}
+
+/*
+ * Object layout:
+ *
+ * object address
+ * Bytes of the object to be managed.
+ * If the freepointer may overlay the object then the free
+ * pointer is the first word of the object.
+ *
+ * Poisoning uses 0x6b (POISON_FREE) and the last byte is
+ * 0xa5 (POISON_END)
+ *
+ * object + s->objsize
+ * Padding to reach word boundary. This is also used for Redzoning.
+ * Padding is extended by another word if Redzoning is enabled and
+ * objsize == inuse.
+ *
+ * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
+ * 0xcc (RED_ACTIVE) for objects in use.
+ *
+ * object + s->inuse
+ * Meta data starts here.
+ *
+ * A. Free pointer (if we cannot overwrite object on free)
+ * B. Tracking data for SLAB_STORE_USER
+ * C. Padding to reach required alignment boundary or at mininum
+ * one word if debuggin is on to be able to detect writes
+ * before the word boundary.
+ *
+ * Padding is done using 0x5a (POISON_INUSE)
+ *
+ * object + s->size
+ * Nothing is used beyond s->size.
+ */
+
+static int check_pad_bytes(struct kmem_cache *s, struct slqb_page *page, u8 *p)
+{
+ unsigned long off = s->inuse; /* The end of info */
+
+ if (s->offset) {
+ /* Freepointer is placed after the object. */
+ off += sizeof(void *);
+ }
+
+ if (s->flags & SLAB_STORE_USER) {
+ /* We also have user information there */
+ off += 2 * sizeof(struct track);
+ }
+
+ if (s->size == off)
+ return 1;
+
+ return check_bytes_and_report(s, page, p, "Object padding",
+ p + off, POISON_INUSE, s->size - off);
+}
+
+static int slab_pad_check(struct kmem_cache *s, struct slqb_page *page)
+{
+ u8 *start;
+ u8 *fault;
+ u8 *end;
+ int length;
+ int remainder;
+
+ if (!(s->flags & SLAB_POISON))
+ return 1;
+
+ start = slqb_page_address(page);
+ end = start + (PAGE_SIZE << s->order);
+ length = s->objects * s->size;
+ remainder = end - (start + length);
+ if (!remainder)
+ return 1;
+
+ fault = check_bytes(start + length, POISON_INUSE, remainder);
+ if (!fault)
+ return 1;
+
+ while (end > fault && end[-1] == POISON_INUSE)
+ end--;
+
+ slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
+ print_section("Padding", start, length);
+
+ restore_bytes(s, "slab padding", POISON_INUSE, start, end);
+ return 0;
+}
+
+static int check_object(struct kmem_cache *s, struct slqb_page *page,
+ void *object, int active)
+{
+ u8 *p = object;
+ u8 *endobject = object + s->objsize;
+
+ if (s->flags & SLAB_RED_ZONE) {
+ unsigned int red =
+ active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
+
+ if (!check_bytes_and_report(s, page, object, "Redzone",
+ endobject, red, s->inuse - s->objsize))
+ return 0;
+ } else {
+ if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
+ check_bytes_and_report(s, page, p, "Alignment padding",
+ endobject, POISON_INUSE, s->inuse - s->objsize);
+ }
+ }
+
+ if (s->flags & SLAB_POISON) {
+ if (!active && (s->flags & __OBJECT_POISON)) {
+ if (!check_bytes_and_report(s, page, p, "Poison", p,
+ POISON_FREE, s->objsize - 1))
+ return 0;
+
+ if (!check_bytes_and_report(s, page, p, "Poison",
+ p + s->objsize - 1, POISON_END, 1))
+ return 0;
+ }
+
+ /*
+ * check_pad_bytes cleans up on its own.
+ */
+ check_pad_bytes(s, page, p);
+ }
+
+ return 1;
+}
+
+static int check_slab(struct kmem_cache *s, struct slqb_page *page)
+{
+ if (!(page->flags & PG_SLQB_BIT)) {
+ slab_err(s, page, "Not a valid slab page");
+ return 0;
+ }
+ if (page->inuse == 0) {
+ slab_err(s, page, "inuse before free / after alloc", s->name);
+ return 0;
+ }
+ if (page->inuse > s->objects) {
+ slab_err(s, page, "inuse %u > max %u",
+ s->name, page->inuse, s->objects);
+ return 0;
+ }
+ /* Slab_pad_check fixes things up after itself */
+ slab_pad_check(s, page);
+ return 1;
+}
+
+static void trace(struct kmem_cache *s, struct slqb_page *page,
+ void *object, int alloc)
+{
+ if (s->flags & SLAB_TRACE) {
+ printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
+ s->name,
+ alloc ? "alloc" : "free",
+ object, page->inuse,
+ page->freelist);
+
+ if (!alloc)
+ print_section("Object", (void *)object, s->objsize);
+
+ dump_stack();
+ }
+}
+
+static void setup_object_debug(struct kmem_cache *s, struct slqb_page *page,
+ void *object)
+{
+ if (!slab_debug(s))
+ return;
+
+ if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
+ return;
+
+ init_object(s, object, 0);
+ init_tracking(s, object);
+}
+
+static int alloc_debug_processing(struct kmem_cache *s,
+ void *object, unsigned long addr)
+{
+ struct slqb_page *page;
+ page = virt_to_head_slqb_page(object);
+
+ if (!check_slab(s, page))
+ goto bad;
+
+ if (!check_valid_pointer(s, page, object)) {
+ object_err(s, page, object, "Freelist Pointer check fails");
+ goto bad;
+ }
+
+ if (object && !check_object(s, page, object, 0))
+ goto bad;
+
+ /* Success perform special debug activities for allocs */
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, object, TRACK_ALLOC, addr);
+ trace(s, page, object, 1);
+ init_object(s, object, 1);
+ return 1;
+
+bad:
+ return 0;
+}
+
+static int free_debug_processing(struct kmem_cache *s,
+ void *object, unsigned long addr)
+{
+ struct slqb_page *page;
+ page = virt_to_head_slqb_page(object);
+
+ if (!check_slab(s, page))
+ goto fail;
+
+ if (!check_valid_pointer(s, page, object)) {
+ slab_err(s, page, "Invalid object pointer 0x%p", object);
+ goto fail;
+ }
+
+ if (!check_object(s, page, object, 1))
+ return 0;
+
+ /* Special debug activities for freeing objects */
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, object, TRACK_FREE, addr);
+ trace(s, page, object, 0);
+ init_object(s, object, 0);
+ return 1;
+
+fail:
+ slab_fix(s, "Object at 0x%p not freed", object);
+ return 0;
+}
+
+static int __init setup_slqb_debug(char *str)
+{
+ slqb_debug = DEBUG_DEFAULT_FLAGS;
+ if (*str++ != '=' || !*str) {
+ /*
+ * No options specified. Switch on full debugging.
+ */
+ goto out;
+ }
+
+ if (*str == ',') {
+ /*
+ * No options but restriction on slabs. This means full
+ * debugging for slabs matching a pattern.
+ */
+ goto check_slabs;
+ }
+
+ slqb_debug = 0;
+ if (*str == '-') {
+ /*
+ * Switch off all debugging measures.
+ */
+ goto out;
+ }
+
+ /*
+ * Determine which debug features should be switched on
+ */
+ for (; *str && *str != ','; str++) {
+ switch (tolower(*str)) {
+ case 'f':
+ slqb_debug |= SLAB_DEBUG_FREE;
+ break;
+ case 'z':
+ slqb_debug |= SLAB_RED_ZONE;
+ break;
+ case 'p':
+ slqb_debug |= SLAB_POISON;
+ break;
+ case 'u':
+ slqb_debug |= SLAB_STORE_USER;
+ break;
+ case 't':
+ slqb_debug |= SLAB_TRACE;
+ break;
+ default:
+ printk(KERN_ERR "slqb_debug option '%c' "
+ "unknown. skipped\n", *str);
+ }
+ }
+
+check_slabs:
+ if (*str == ',')
+ slqb_debug_slabs = str + 1;
+out:
+ return 1;
+}
+__setup("slqb_debug", setup_slqb_debug);
+
+static int __init setup_slqb_min_order(char *str)
+{
+ get_option(&str, &slqb_min_order);
+ slqb_min_order = min(slqb_min_order, MAX_ORDER - 1);
+
+ return 1;
+}
+__setup("slqb_min_order=", setup_slqb_min_order);
+
+static int __init setup_slqb_min_objects(char *str)
+{
+ get_option(&str, &slqb_min_objects);
+
+ return 1;
+}
+
+__setup("slqb_min_objects=", setup_slqb_min_objects);
+
+static unsigned long kmem_cache_flags(unsigned long objsize,
+ unsigned long flags, const char *name,
+ void (*ctor)(void *))
+{
+ /*
+ * Enable debugging if selected on the kernel commandline.
+ */
+ if (slqb_debug && (!slqb_debug_slabs ||
+ strncmp(slqb_debug_slabs, name,
+ strlen(slqb_debug_slabs)) == 0))
+ flags |= slqb_debug;
+
+ if (num_possible_nodes() > 1)
+ flags |= SLAB_NUMA;
+
+ return flags;
+}
+#else
+static inline void setup_object_debug(struct kmem_cache *s,
+ struct slqb_page *page, void *object)
+{
+}
+
+static inline int alloc_debug_processing(struct kmem_cache *s,
+ void *object, unsigned long addr)
+{
+ return 0;
+}
+
+static inline int free_debug_processing(struct kmem_cache *s,
+ void *object, unsigned long addr)
+{
+ return 0;
+}
+
+static inline int slab_pad_check(struct kmem_cache *s, struct slqb_page *page)
+{
+ return 1;
+}
+
+static inline int check_object(struct kmem_cache *s, struct slqb_page *page,
+ void *object, int active)
+{
+ return 1;
+}
+
+static inline void add_full(struct kmem_cache_node *n, struct slqb_page *page)
+{
+}
+
+static inline unsigned long kmem_cache_flags(unsigned long objsize,
+ unsigned long flags, const char *name, void (*ctor)(void *))
+{
+ if (num_possible_nodes() > 1)
+ flags |= SLAB_NUMA;
+ return flags;
+}
+
+static const int slqb_debug;
+#endif
+
+/*
+ * allocate a new slab (return its corresponding struct slqb_page)
+ */
+static struct slqb_page *allocate_slab(struct kmem_cache *s,
+ gfp_t flags, int node)
+{
+ struct slqb_page *page;
+ int pages = 1 << s->order;
+
+ flags |= s->allocflags;
+
+ page = (struct slqb_page *)alloc_pages_node(node, flags, s->order);
+ if (!page)
+ return NULL;
+
+ mod_zone_page_state(slqb_page_zone(page),
+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+ pages);
+
+ return page;
+}
+
+/*
+ * Called once for each object on a new slab page
+ */
+static void setup_object(struct kmem_cache *s,
+ struct slqb_page *page, void *object)
+{
+ setup_object_debug(s, page, object);
+ if (unlikely(s->ctor))
+ s->ctor(object);
+}
+
+/*
+ * Allocate a new slab, set up its object list.
+ */
+static struct slqb_page *new_slab_page(struct kmem_cache *s,
+ gfp_t flags, int node, unsigned int colour)
+{
+ struct slqb_page *page;
+ void *start;
+ void *last;
+ void *p;
+
+ BUG_ON(flags & GFP_SLAB_BUG_MASK);
+
+ page = allocate_slab(s,
+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+ if (!page)
+ goto out;
+
+ page->flags |= PG_SLQB_BIT;
+
+ start = page_address(&page->page);
+
+ if (unlikely(slab_poison(s)))
+ memset(start, POISON_INUSE, PAGE_SIZE << s->order);
+
+ start += colour;
+
+ last = start;
+ for_each_object(p, s, start) {
+ setup_object(s, page, p);
+ set_freepointer(s, last, p);
+ last = p;
+ }
+ set_freepointer(s, last, NULL);
+
+ page->freelist = start;
+ page->inuse = 0;
+out:
+ return page;
+}
+
+/*
+ * Free a slab page back to the page allocator
+ */
+static void __free_slab(struct kmem_cache *s, struct slqb_page *page)
+{
+ int pages = 1 << s->order;
+
+ if (unlikely(slab_debug(s))) {
+ void *p;
+
+ slab_pad_check(s, page);
+ for_each_free_object(p, s, page->freelist)
+ check_object(s, page, p, 0);
+ }
+
+ mod_zone_page_state(slqb_page_zone(page),
+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+ -pages);
+
+ __free_slqb_pages(page, s->order, pages);
+}
+
+static void rcu_free_slab(struct rcu_head *h)
+{
+ struct slqb_page *page;
+
+ page = container_of(h, struct slqb_page, rcu_head);
+ __free_slab(page->list->cache, page);
+}
+
+static void free_slab(struct kmem_cache *s, struct slqb_page *page)
+{
+ VM_BUG_ON(page->inuse);
+ if (unlikely(s->flags & SLAB_DESTROY_BY_RCU))
+ call_rcu(&page->rcu_head, rcu_free_slab);
+ else
+ __free_slab(s, page);
+}
+
+/*
+ * Return an object to its slab.
+ *
+ * Caller must be the owner CPU in the case of per-CPU list, or hold the node's
+ * list_lock in the case of per-node list.
+ */
+static int free_object_to_page(struct kmem_cache *s,
+ struct kmem_cache_list *l, struct slqb_page *page,
+ void *object)
+{
+ VM_BUG_ON(page->list != l);
+
+ set_freepointer(s, object, page->freelist);
+ page->freelist = object;
+ page->inuse--;
+
+ if (!page->inuse) {
+ if (likely(s->objects > 1)) {
+ l->nr_partial--;
+ list_del(&page->lru);
+ }
+ l->nr_slabs--;
+ free_slab(s, page);
+ slqb_stat_inc(l, FLUSH_SLAB_FREE);
+ return 1;
+
+ } else if (page->inuse + 1 == s->objects) {
+ l->nr_partial++;
+ list_add(&page->lru, &l->partial);
+ slqb_stat_inc(l, FLUSH_SLAB_PARTIAL);
+ return 0;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+static void slab_free_to_remote(struct kmem_cache *s, struct slqb_page *page,
+ void *object, struct kmem_cache_cpu *c);
+#endif
+
+/*
+ * Flush the LIFO list of objects on a list. They are sent back to their pages
+ * in case the pages also belong to the list, or to our CPU's remote-free list
+ * in the case they do not.
+ *
+ * Doesn't flush the entire list. flush_free_list_all does.
+ *
+ * Caller must be the owner CPU in the case of per-CPU list, or hold the node's
+ * list_lock in the case of per-node list.
+ */
+static void flush_free_list(struct kmem_cache *s, struct kmem_cache_list *l)
+{
+ void **head;
+ int nr;
+ int locked = 0;
+
+ nr = l->freelist.nr;
+ if (unlikely(!nr))
+ return;
+
+ nr = min(slab_freebatch(s), nr);
+
+ slqb_stat_inc(l, FLUSH_FREE_LIST);
+ slqb_stat_add(l, FLUSH_FREE_LIST_OBJECTS, nr);
+
+ l->freelist.nr -= nr;
+ head = l->freelist.head;
+
+ do {
+ struct slqb_page *page;
+ void **object;
+
+ object = head;
+ VM_BUG_ON(!object);
+ head = get_freepointer(s, object);
+ page = virt_to_head_slqb_page(object);
+
+#ifdef CONFIG_SMP
+ if (page->list != l) {
+ struct kmem_cache_cpu *c;
+
+ if (locked) {
+ spin_unlock(&l->page_lock);
+ locked = 0;
+ }
+
+ c = get_cpu_slab(s, smp_processor_id());
+
+ slab_free_to_remote(s, page, object, c);
+ slqb_stat_inc(l, FLUSH_FREE_LIST_REMOTE);
+ } else
+#endif
+ {
+ if (!locked) {
+ spin_lock(&l->page_lock);
+ locked = 1;
+ }
+ free_object_to_page(s, l, page, object);
+ }
+
+ nr--;
+ } while (nr);
+
+ if (locked)
+ spin_unlock(&l->page_lock);
+
+ l->freelist.head = head;
+ if (!l->freelist.nr)
+ l->freelist.tail = NULL;
+}
+
+static void flush_free_list_all(struct kmem_cache *s, struct kmem_cache_list *l)
+{
+ while (l->freelist.nr)
+ flush_free_list(s, l);
+}
+
+#ifdef CONFIG_SMP
+/*
+ * If enough objects have been remotely freed back to this list,
+ * remote_free_check will be set. In which case, we'll eventually come here
+ * to take those objects off our remote_free list and onto our LIFO freelist.
+ *
+ * Caller must be the owner CPU in the case of per-CPU list, or hold the node's
+ * list_lock in the case of per-node list.
+ */
+static void claim_remote_free_list(struct kmem_cache *s,
+ struct kmem_cache_list *l)
+{
+ void **head, **tail;
+ int nr;
+
+ if (!l->remote_free.list.nr)
+ return;
+
+ spin_lock(&l->remote_free.lock);
+
+ l->remote_free_check = 0;
+ head = l->remote_free.list.head;
+ l->remote_free.list.head = NULL;
+ tail = l->remote_free.list.tail;
+ l->remote_free.list.tail = NULL;
+ nr = l->remote_free.list.nr;
+ l->remote_free.list.nr = 0;
+
+ spin_unlock(&l->remote_free.lock);
+
+ VM_BUG_ON(!nr);
+
+ if (!l->freelist.nr) {
+ /* Get head hot for likely subsequent allocation or flush */
+ prefetchw(head);
+ l->freelist.head = head;
+ } else
+ set_freepointer(s, l->freelist.tail, head);
+ l->freelist.tail = tail;
+
+ l->freelist.nr += nr;
+
+ slqb_stat_inc(l, CLAIM_REMOTE_LIST);
+ slqb_stat_add(l, CLAIM_REMOTE_LIST_OBJECTS, nr);
+}
+#else
+static inline void claim_remote_free_list(struct kmem_cache *s,
+ struct kmem_cache_list *l)
+{
+}
+#endif
+
+/*
+ * Allocation fastpath. Get an object from the list's LIFO freelist, or
+ * return NULL if it is empty.
+ *
+ * Caller must be the owner CPU in the case of per-CPU list, or hold the node's
+ * list_lock in the case of per-node list.
+ */
+static __always_inline void *__cache_list_get_object(struct kmem_cache *s,
+ struct kmem_cache_list *l)
+{
+ void *object;
+
+ object = l->freelist.head;
+ if (likely(object)) {
+ void *next = get_freepointer(s, object);
+
+ VM_BUG_ON(!l->freelist.nr);
+ l->freelist.nr--;
+ l->freelist.head = next;
+
+ return object;
+ }
+ VM_BUG_ON(l->freelist.nr);
+
+#ifdef CONFIG_SMP
+ if (unlikely(l->remote_free_check)) {
+ claim_remote_free_list(s, l);
+
+ if (l->freelist.nr > slab_hiwater(s))
+ flush_free_list(s, l);
+
+ /* repetition here helps gcc :( */
+ object = l->freelist.head;
+ if (likely(object)) {
+ void *next = get_freepointer(s, object);
+
+ VM_BUG_ON(!l->freelist.nr);
+ l->freelist.nr--;
+ l->freelist.head = next;
+
+ return object;
+ }
+ VM_BUG_ON(l->freelist.nr);
+ }
+#endif
+
+ return NULL;
+}
+
+/*
+ * Slow(er) path. Get a page from this list's existing pages. Will be a
+ * new empty page in the case that __slab_alloc_page has just been called
+ * (empty pages otherwise never get queued up on the lists), or a partial page
+ * already on the list.
+ *
+ * Caller must be the owner CPU in the case of per-CPU list, or hold the node's
+ * list_lock in the case of per-node list.
+ */
+static noinline void *__cache_list_get_page(struct kmem_cache *s,
+ struct kmem_cache_list *l)
+{
+ struct slqb_page *page;
+ void *object;
+
+ if (unlikely(!l->nr_partial))
+ return NULL;
+
+ page = list_first_entry(&l->partial, struct slqb_page, lru);
+ VM_BUG_ON(page->inuse == s->objects);
+ if (page->inuse + 1 == s->objects) {
+ l->nr_partial--;
+ list_del(&page->lru);
+ }
+
+ VM_BUG_ON(!page->freelist);
+
+ page->inuse++;
+
+ object = page->freelist;
+ page->freelist = get_freepointer(s, object);
+ if (page->freelist)
+ prefetchw(page->freelist);
+ VM_BUG_ON((page->inuse == s->objects) != (page->freelist == NULL));
+ slqb_stat_inc(l, ALLOC_SLAB_FILL);
+
+ return object;
+}
+
+static void *cache_list_get_page(struct kmem_cache *s,
+ struct kmem_cache_list *l)
+{
+ void *object;
+
+ if (unlikely(!l->nr_partial))
+ return NULL;
+
+ spin_lock(&l->page_lock);
+ object = __cache_list_get_page(s, l);
+ spin_unlock(&l->page_lock);
+
+ return object;
+}
+
+/*
+ * Allocation slowpath. Allocate a new slab page from the page allocator, and
+ * put it on the list's partial list. Must be followed by an allocation so
+ * that we don't have dangling empty pages on the partial list.
+ *
+ * Returns 0 on allocation failure.
+ *
+ * Must be called with interrupts disabled.
+ */
+static noinline void *__slab_alloc_page(struct kmem_cache *s,
+ gfp_t gfpflags, int node)
+{
+ struct slqb_page *page;
+ struct kmem_cache_list *l;
+ struct kmem_cache_cpu *c;
+ unsigned int colour;
+ void *object;
+
+ c = get_cpu_slab(s, smp_processor_id());
+ colour = c->colour_next;
+ c->colour_next += s->colour_off;
+ if (c->colour_next >= s->colour_range)
+ c->colour_next = 0;
+
+ /* Caller handles __GFP_ZERO */
+ gfpflags &= ~__GFP_ZERO;
+
+ if (gfpflags & __GFP_WAIT)
+ local_irq_enable();
+ page = new_slab_page(s, gfpflags, node, colour);
+ if (gfpflags & __GFP_WAIT)
+ local_irq_disable();
+ if (unlikely(!page))
+ return page;
+
+ if (!NUMA_BUILD || likely(slqb_page_to_nid(page) == numa_node_id())) {
+ struct kmem_cache_cpu *c;
+ int cpu = smp_processor_id();
+
+ c = get_cpu_slab(s, cpu);
+ l = &c->list;
+ page->list = l;
+
+ spin_lock(&l->page_lock);
+ l->nr_slabs++;
+ l->nr_partial++;
+ list_add(&page->lru, &l->partial);
+ slqb_stat_inc(l, ALLOC);
+ slqb_stat_inc(l, ALLOC_SLAB_NEW);
+ object = __cache_list_get_page(s, l);
+ spin_unlock(&l->page_lock);
+ } else {
+#ifdef CONFIG_NUMA
+ struct kmem_cache_node *n;
+
+ n = s->node_slab[slqb_page_to_nid(page)];
+ l = &n->list;
+ page->list = l;
+
+ spin_lock(&n->list_lock);
+ spin_lock(&l->page_lock);
+ l->nr_slabs++;
+ l->nr_partial++;
+ list_add(&page->lru, &l->partial);
+ slqb_stat_inc(l, ALLOC);
+ slqb_stat_inc(l, ALLOC_SLAB_NEW);
+ object = __cache_list_get_page(s, l);
+ spin_unlock(&l->page_lock);
+ spin_unlock(&n->list_lock);
+#endif
+ }
+ VM_BUG_ON(!object);
+ return object;
+}
+
+#ifdef CONFIG_NUMA
+static noinline int alternate_nid(struct kmem_cache *s,
+ gfp_t gfpflags, int node)
+{
+ if (in_interrupt() || (gfpflags & __GFP_THISNODE))
+ return node;
+ if (cpuset_do_slab_mem_spread() && (s->flags & SLAB_MEM_SPREAD))
+ return cpuset_mem_spread_node();
+ else if (current->mempolicy)
+ return slab_node(current->mempolicy);
+ return node;
+}
+
+/*
+ * Allocate an object from a remote node. Return NULL if none could be found
+ * (in which case, caller should allocate a new slab)
+ *
+ * Must be called with interrupts disabled.
+ */
+static void *__remote_slab_alloc_node(struct kmem_cache *s,
+ gfp_t gfpflags, int node)
+{
+ struct kmem_cache_node *n;
+ struct kmem_cache_list *l;
+ void *object;
+
+ n = s->node_slab[node];
+ if (unlikely(!n)) /* node has no memory */
+ return NULL;
+ l = &n->list;
+
+ spin_lock(&n->list_lock);
+
+ object = __cache_list_get_object(s, l);
+ if (unlikely(!object)) {
+ object = cache_list_get_page(s, l);
+ if (unlikely(!object)) {
+ spin_unlock(&n->list_lock);
+ return __slab_alloc_page(s, gfpflags, node);
+ }
+ }
+ if (likely(object))
+ slqb_stat_inc(l, ALLOC);
+ spin_unlock(&n->list_lock);
+ return object;
+}
+
+static noinline void *__remote_slab_alloc(struct kmem_cache *s,
+ gfp_t gfpflags, int node)
+{
+ void *object;
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(gfpflags);
+
+ object = __remote_slab_alloc_node(s, gfpflags, node);
+ if (likely(object || (gfpflags & __GFP_THISNODE)))
+ return object;
+
+ zonelist = node_zonelist(slab_node(current->mempolicy), gfpflags);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ if (!cpuset_zone_allowed_hardwall(zone, gfpflags))
+ continue;
+
+ node = zone_to_nid(zone);
+ object = __remote_slab_alloc_node(s, gfpflags, node);
+ if (likely(object))
+ return object;
+ }
+ return NULL;
+}
+#endif
+
+/*
+ * Main allocation path. Return an object, or NULL on allocation failure.
+ *
+ * Must be called with interrupts disabled.
+ */
+static __always_inline void *__slab_alloc(struct kmem_cache *s,
+ gfp_t gfpflags, int node)
+{
+ void *object;
+ struct kmem_cache_cpu *c;
+ struct kmem_cache_list *l;
+
+#ifdef CONFIG_NUMA
+ if (unlikely(node != -1) && unlikely(node != numa_node_id())) {
+try_remote:
+ return __remote_slab_alloc(s, gfpflags, node);
+ }
+#endif
+
+ c = get_cpu_slab(s, smp_processor_id());
+ VM_BUG_ON(!c);
+ l = &c->list;
+ object = __cache_list_get_object(s, l);
+ if (unlikely(!object)) {
+ object = cache_list_get_page(s, l);
+ if (unlikely(!object)) {
+ object = __slab_alloc_page(s, gfpflags, node);
+#ifdef CONFIG_NUMA
+ if (unlikely(!object)) {
+ node = numa_node_id();
+ goto try_remote;
+ }
+#endif
+ return object;
+ }
+ }
+ if (likely(object))
+ slqb_stat_inc(l, ALLOC);
+ return object;
+}
+
+/*
+ * Perform some interrupts-on processing around the main allocation path
+ * (debug checking and memset()ing).
+ */
+static __always_inline void *slab_alloc(struct kmem_cache *s,
+ gfp_t gfpflags, int node, unsigned long addr)
+{
+ void *object;
+ unsigned long flags;
+
+ gfpflags &= gfp_allowed_mask;
+
+again:
+ local_irq_save(flags);
+ object = __slab_alloc(s, gfpflags, node);
+ local_irq_restore(flags);
+
+ if (unlikely(slab_debug(s)) && likely(object)) {
+ if (unlikely(!alloc_debug_processing(s, object, addr)))
+ goto again;
+ }
+
+ if (unlikely(gfpflags & __GFP_ZERO) && likely(object))
+ memset(object, 0, s->objsize);
+
+ return object;
+}
+
+static __always_inline void *__kmem_cache_alloc(struct kmem_cache *s,
+ gfp_t gfpflags, unsigned long caller)
+{
+ int node = -1;
+
+#ifdef CONFIG_NUMA
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
+ node = alternate_nid(s, gfpflags, node);
+#endif
+ return slab_alloc(s, gfpflags, node, caller);
+}
+
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+{
+ return __kmem_cache_alloc(s, gfpflags, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc);
+
+#ifdef CONFIG_NUMA
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
+{
+ return slab_alloc(s, gfpflags, node, _RET_IP_);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_node);
+#endif
+
+#ifdef CONFIG_SMP
+/*
+ * Flush this CPU's remote free list of objects back to the list from where
+ * they originate. They end up on that list's remotely freed list, and
+ * eventually we set it's remote_free_check if there are enough objects on it.
+ *
+ * This seems convoluted, but it keeps is from stomping on the target CPU's
+ * fastpath cachelines.
+ *
+ * Must be called with interrupts disabled.
+ */
+static void flush_remote_free_cache(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
+{
+ struct kmlist *src;
+ struct kmem_cache_list *dst;
+ unsigned int nr;
+ int set;
+
+ src = &c->rlist;
+ nr = src->nr;
+ if (unlikely(!nr))
+ return;
+
+#ifdef CONFIG_SLQB_STATS
+ {
+ struct kmem_cache_list *l = &c->list;
+
+ slqb_stat_inc(l, FLUSH_RFREE_LIST);
+ slqb_stat_add(l, FLUSH_RFREE_LIST_OBJECTS, nr);
+ }
+#endif
+
+ dst = c->remote_cache_list;
+
+ /*
+ * Less common case, dst is filling up so free synchronously.
+ * No point in having remote CPU free thse as it will just
+ * free them back to the page list anyway.
+ */
+ if (unlikely(dst->remote_free.list.nr > (slab_hiwater(s) >> 1))) {
+ void **head;
+
+ head = src->head;
+ spin_lock(&dst->page_lock);
+ do {
+ struct slqb_page *page;
+ void **object;
+
+ object = head;
+ VM_BUG_ON(!object);
+ head = get_freepointer(s, object);
+ page = virt_to_head_slqb_page(object);
+
+ free_object_to_page(s, dst, page, object);
+ nr--;
+ } while (nr);
+ spin_unlock(&dst->page_lock);
+
+ src->head = NULL;
+ src->tail = NULL;
+ src->nr = 0;
+
+ return;
+ }
+
+ spin_lock(&dst->remote_free.lock);
+
+ if (!dst->remote_free.list.head)
+ dst->remote_free.list.head = src->head;
+ else
+ set_freepointer(s, dst->remote_free.list.tail, src->head);
+ dst->remote_free.list.tail = src->tail;
+
+ src->head = NULL;
+ src->tail = NULL;
+ src->nr = 0;
+
+ if (dst->remote_free.list.nr < slab_freebatch(s))
+ set = 1;
+ else
+ set = 0;
+
+ dst->remote_free.list.nr += nr;
+
+ if (unlikely(dst->remote_free.list.nr >= slab_freebatch(s) && set))
+ dst->remote_free_check = 1;
+
+ spin_unlock(&dst->remote_free.lock);
+}
+
+/*
+ * Free an object to this CPU's remote free list.
+ *
+ * Must be called with interrupts disabled.
+ */
+static noinline void slab_free_to_remote(struct kmem_cache *s,
+ struct slqb_page *page, void *object,
+ struct kmem_cache_cpu *c)
+{
+ struct kmlist *r;
+
+ /*
+ * Our remote free list corresponds to a different list. Must
+ * flush it and switch.
+ */
+ if (page->list != c->remote_cache_list) {
+ flush_remote_free_cache(s, c);
+ c->remote_cache_list = page->list;
+ }
+
+ r = &c->rlist;
+ if (!r->head)
+ r->head = object;
+ else
+ set_freepointer(s, r->tail, object);
+ set_freepointer(s, object, NULL);
+ r->tail = object;
+ r->nr++;
+
+ if (unlikely(r->nr >= slab_freebatch(s)))
+ flush_remote_free_cache(s, c);
+}
+#endif
+
+/*
+ * Main freeing path. Return an object, or NULL on allocation failure.
+ *
+ * Must be called with interrupts disabled.
+ */
+static __always_inline void __slab_free(struct kmem_cache *s,
+ struct slqb_page *page, void *object)
+{
+ struct kmem_cache_cpu *c;
+ struct kmem_cache_list *l;
+ int thiscpu = smp_processor_id();
+
+ c = get_cpu_slab(s, thiscpu);
+ l = &c->list;
+
+ slqb_stat_inc(l, FREE);
+
+ if (!NUMA_BUILD || !slab_numa(s) ||
+ likely(slqb_page_to_nid(page) == numa_node_id())) {
+ /*
+ * Freeing fastpath. Collects all local-node objects, not
+ * just those allocated from our per-CPU list. This allows
+ * fast transfer of objects from one CPU to another within
+ * a given node.
+ */
+ set_freepointer(s, object, l->freelist.head);
+ l->freelist.head = object;
+ if (!l->freelist.nr)
+ l->freelist.tail = object;
+ l->freelist.nr++;
+
+ if (unlikely(l->freelist.nr > slab_hiwater(s)))
+ flush_free_list(s, l);
+
+ } else {
+#ifdef CONFIG_SMP
+ /*
+ * Freeing an object that was allocated on a remote node.
+ */
+ slab_free_to_remote(s, page, object, c);
+ slqb_stat_inc(l, FREE_REMOTE);
+#endif
+ }
+}
+
+/*
+ * Perform some interrupts-on processing around the main freeing path
+ * (debug checking).
+ */
+static __always_inline void slab_free(struct kmem_cache *s,
+ struct slqb_page *page, void *object)
+{
+ unsigned long flags;
+
+ prefetchw(object);
+
+ debug_check_no_locks_freed(object, s->objsize);
+ if (likely(object) && unlikely(slab_debug(s))) {
+ if (unlikely(!free_debug_processing(s, object, _RET_IP_)))
+ return;
+ }
+
+ local_irq_save(flags);
+ __slab_free(s, page, object);
+ local_irq_restore(flags);
+}
+
+void kmem_cache_free(struct kmem_cache *s, void *object)
+{
+ struct slqb_page *page = NULL;
+
+ if (slab_numa(s))
+ page = virt_to_head_slqb_page(object);
+ slab_free(s, page, object);
+}
+EXPORT_SYMBOL(kmem_cache_free);
+
+/*
+ * Calculate the order of allocation given an slab object size.
+ *
+ * Order 0 allocations are preferred since order 0 does not cause fragmentation
+ * in the page allocator, and they have fastpaths in the page allocator. But
+ * also minimise external fragmentation with large objects.
+ */
+static int slab_order(int size, int max_order, int frac)
+{
+ int order;
+
+ if (fls(size - 1) <= PAGE_SHIFT)
+ order = 0;
+ else
+ order = fls(size - 1) - PAGE_SHIFT;
+ if (order < slqb_min_order)
+ order = slqb_min_order;
+
+ while (order <= max_order) {
+ unsigned long slab_size = PAGE_SIZE << order;
+ unsigned long objects;
+ unsigned long waste;
+
+ objects = slab_size / size;
+ if (!objects)
+ goto next;
+
+ if (order < MAX_ORDER && objects < slqb_min_objects) {
+ /*
+ * if we don't have enough objects for min_objects,
+ * then try the next size up. Unless we have reached
+ * our maximum possible page size.
+ */
+ goto next;
+ }
+
+ waste = slab_size - (objects * size);
+
+ if (waste * frac <= slab_size)
+ break;
+
+next:
+ order++;
+ }
+
+ return order;
+}
+
+static int calculate_order(int size)
+{
+ int order;
+
+ /*
+ * Attempt to find best configuration for a slab. This
+ * works by first attempting to generate a layout with
+ * the best configuration and backing off gradually.
+ */
+ order = slab_order(size, 1, 4);
+ if (order <= 1)
+ return order;
+
+ /*
+ * This size cannot fit in order-1. Allow bigger orders, but
+ * forget about trying to save space.
+ */
+ order = slab_order(size, MAX_ORDER - 1, 0);
+ if (order < MAX_ORDER)
+ return order;
+
+ return -ENOSYS;
+}
+
+/*
+ * Figure out what the alignment of the objects will be.
+ */
+static unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign = cache_line_size();
+
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
+static void init_kmem_cache_list(struct kmem_cache *s,
+ struct kmem_cache_list *l)
+{
+ l->cache = s;
+ l->freelist.nr = 0;
+ l->freelist.head = NULL;
+ l->freelist.tail = NULL;
+ l->nr_partial = 0;
+ l->nr_slabs = 0;
+ INIT_LIST_HEAD(&l->partial);
+ spin_lock_init(&l->page_lock);
+
+#ifdef CONFIG_SMP
+ l->remote_free_check = 0;
+ spin_lock_init(&l->remote_free.lock);
+ l->remote_free.list.nr = 0;
+ l->remote_free.list.head = NULL;
+ l->remote_free.list.tail = NULL;
+#endif
+
+#ifdef CONFIG_SLQB_STATS
+ memset(l->stats, 0, sizeof(l->stats));
+#endif
+}
+
+static void init_kmem_cache_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
+{
+ init_kmem_cache_list(s, &c->list);
+
+ c->colour_next = 0;
+#ifdef CONFIG_SMP
+ c->rlist.nr = 0;
+ c->rlist.head = NULL;
+ c->rlist.tail = NULL;
+ c->remote_cache_list = NULL;
+#endif
+}
+
+#ifdef CONFIG_NUMA
+static void init_kmem_cache_node(struct kmem_cache *s,
+ struct kmem_cache_node *n)
+{
+ spin_lock_init(&n->list_lock);
+ init_kmem_cache_list(s, &n->list);
+}
+#endif
+
+/* Initial slabs. */
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_cache_cpus);
+#endif
+#ifdef CONFIG_NUMA
+/* XXX: really need a DEFINE_PER_NODE for per-node data, but this is better than
+ * a static array */
+static DEFINE_PER_CPU(struct kmem_cache_node, kmem_cache_nodes);
+#endif
+
+#ifdef CONFIG_SMP
+static struct kmem_cache kmem_cpu_cache;
+static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_cpu_cpus);
+#ifdef CONFIG_NUMA
+static DEFINE_PER_CPU(struct kmem_cache_node, kmem_cpu_nodes); /* XXX per-nid */
+#endif
+#endif
+
+#ifdef CONFIG_NUMA
+static struct kmem_cache kmem_node_cache;
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_node_cpus);
+#endif
+static DEFINE_PER_CPU(struct kmem_cache_node, kmem_node_nodes); /*XXX per-nid */
+#endif
+
+#ifdef CONFIG_SMP
+static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
+ int cpu)
+{
+ struct kmem_cache_cpu *c;
+ int node;
+
+ node = cpu_to_node(cpu);
+
+ c = kmem_cache_alloc_node(&kmem_cpu_cache, GFP_KERNEL, node);
+ if (!c)
+ return NULL;
+
+ init_kmem_cache_cpu(s, c);
+ return c;
+}
+
+static void free_kmem_cache_cpus(struct kmem_cache *s)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct kmem_cache_cpu *c;
+
+ c = s->cpu_slab[cpu];
+ if (c) {
+ kmem_cache_free(&kmem_cpu_cache, c);
+ s->cpu_slab[cpu] = NULL;
+ }
+ }
+}
+
+static int alloc_kmem_cache_cpus(struct kmem_cache *s)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct kmem_cache_cpu *c;
+
+ c = s->cpu_slab[cpu];
+ if (c)
+ continue;
+
+ c = alloc_kmem_cache_cpu(s, cpu);
+ if (!c) {
+ free_kmem_cache_cpus(s);
+ return 0;
+ }
+ s->cpu_slab[cpu] = c;
+ }
+ return 1;
+}
+
+#else
+static inline void free_kmem_cache_cpus(struct kmem_cache *s)
+{
+}
+
+static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
+{
+ init_kmem_cache_cpu(s, &s->cpu_slab);
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_NUMA
+static void free_kmem_cache_nodes(struct kmem_cache *s)
+{
+ int node;
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n;
+
+ n = s->node_slab[node];
+ if (n) {
+ kmem_cache_free(&kmem_node_cache, n);
+ s->node_slab[node] = NULL;
+ }
+ }
+}
+
+static int alloc_kmem_cache_nodes(struct kmem_cache *s)
+{
+ int node;
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n;
+
+ n = kmem_cache_alloc_node(&kmem_node_cache, GFP_KERNEL, node);
+ if (!n) {
+ free_kmem_cache_nodes(s);
+ return 0;
+ }
+ init_kmem_cache_node(s, n);
+ s->node_slab[node] = n;
+ }
+ return 1;
+}
+#else
+static void free_kmem_cache_nodes(struct kmem_cache *s)
+{
+}
+
+static int alloc_kmem_cache_nodes(struct kmem_cache *s)
+{
+ return 1;
+}
+#endif
+
+/*
+ * calculate_sizes() determines the order and the distribution of data within
+ * a slab object.
+ */
+static int calculate_sizes(struct kmem_cache *s)
+{
+ unsigned long flags = s->flags;
+ unsigned long size = s->objsize;
+ unsigned long align = s->align;
+
+ /*
+ * Determine if we can poison the object itself. If the user of
+ * the slab may touch the object after free or before allocation
+ * then we should never poison the object itself.
+ */
+ if (slab_poison(s) && !(flags & SLAB_DESTROY_BY_RCU) && !s->ctor)
+ s->flags |= __OBJECT_POISON;
+ else
+ s->flags &= ~__OBJECT_POISON;
+
+ /*
+ * Round up object size to the next word boundary. We can only
+ * place the free pointer at word boundaries and this determines
+ * the possible location of the free pointer.
+ */
+ size = ALIGN(size, sizeof(void *));
+
+#ifdef CONFIG_SLQB_DEBUG
+ /*
+ * If we are Redzoning then check if there is some space between the
+ * end of the object and the free pointer. If not then add an
+ * additional word to have some bytes to store Redzone information.
+ */
+ if ((flags & SLAB_RED_ZONE) && size == s->objsize)
+ size += sizeof(void *);
+#endif
+
+ /*
+ * With that we have determined the number of bytes in actual use
+ * by the object. This is the potential offset to the free pointer.
+ */
+ s->inuse = size;
+
+ if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || s->ctor)) {
+ /*
+ * Relocate free pointer after the object if it is not
+ * permitted to overwrite the first word of the object on
+ * kmem_cache_free.
+ *
+ * This is the case if we do RCU, have a constructor or
+ * destructor or are poisoning the objects.
+ */
+ s->offset = size;
+ size += sizeof(void *);
+ }
+
+#ifdef CONFIG_SLQB_DEBUG
+ if (flags & SLAB_STORE_USER) {
+ /*
+ * Need to store information about allocs and frees after
+ * the object.
+ */
+ size += 2 * sizeof(struct track);
+ }
+
+ if (flags & SLAB_RED_ZONE) {
+ /*
+ * Add some empty padding so that we can catch
+ * overwrites from earlier objects rather than let
+ * tracking information or the free pointer be
+ * corrupted if an user writes before the start
+ * of the object.
+ */
+ size += sizeof(void *);
+ }
+#endif
+
+ /*
+ * Determine the alignment based on various parameters that the
+ * user specified and the dynamic determination of cache line size
+ * on bootup.
+ */
+ align = calculate_alignment(flags, align, s->objsize);
+
+ /*
+ * SLQB stores one object immediately after another beginning from
+ * offset 0. In order to align the objects we have to simply size
+ * each object to conform to the alignment.
+ */
+ size = ALIGN(size, align);
+ s->size = size;
+ s->order = calculate_order(size);
+
+ if (s->order < 0)
+ return 0;
+
+ s->allocflags = 0;
+ if (s->order)
+ s->allocflags |= __GFP_COMP;
+
+ if (s->flags & SLAB_CACHE_DMA)
+ s->allocflags |= SLQB_DMA;
+
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ s->allocflags |= __GFP_RECLAIMABLE;
+
+ /*
+ * Determine the number of objects per slab
+ */
+ s->objects = (PAGE_SIZE << s->order) / size;
+
+ s->freebatch = max(4UL*PAGE_SIZE / size,
+ min(256UL, 64*PAGE_SIZE / size));
+ if (!s->freebatch)
+ s->freebatch = 1;
+ s->hiwater = s->freebatch << 2;
+
+ return !!s->objects;
+
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Per-cpu allocator can't be used because it always uses slab allocator,
+ * and it can't do per-node allocations.
+ */
+static void *kmem_cache_dyn_array_alloc(int ids)
+{
+ size_t size = sizeof(void *) * ids;
+
+ BUG_ON(!size);
+
+ if (unlikely(!slab_is_available())) {
+ static void *nextmem;
+ static size_t nextleft;
+ void *ret;
+
+ /*
+ * Special case for setting up initial caches. These will
+ * never get freed by definition so we can do it rather
+ * simply.
+ */
+ if (size > nextleft) {
+ nextmem = alloc_pages_exact(size, GFP_KERNEL);
+ if (!nextmem)
+ return NULL;
+ nextleft = roundup(size, PAGE_SIZE);
+ }
+
+ ret = nextmem;
+ nextleft -= size;
+ nextmem += size;
+ memset(ret, 0, size);
+ return ret;
+ } else {
+ return kzalloc(size, GFP_KERNEL);
+ }
+}
+
+static void kmem_cache_dyn_array_free(void *array)
+{
+ if (unlikely(!slab_is_available()))
+ return; /* error case without crashing here (will panic soon) */
+ kfree(array);
+}
+#endif
+
+static int kmem_cache_open(struct kmem_cache *s,
+ const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *), int alloc)
+{
+ unsigned int left_over;
+
+ memset(s, 0, sizeof(struct kmem_cache));
+ s->name = name;
+ s->ctor = ctor;
+ s->objsize = size;
+ s->align = align;
+ s->flags = kmem_cache_flags(size, flags, name, ctor);
+
+ if (!calculate_sizes(s))
+ goto error;
+
+ if (!slab_debug(s)) {
+ left_over = (PAGE_SIZE << s->order) - (s->objects * s->size);
+ s->colour_off = max(cache_line_size(), s->align);
+ s->colour_range = left_over;
+ } else {
+ s->colour_off = 0;
+ s->colour_range = 0;
+ }
+
+ /*
+ * Protect all alloc_kmem_cache_cpus/nodes allocations with slqb_lock
+ * to lock out hotplug, just in case (probably not strictly needed
+ * here).
+ */
+ down_write(&slqb_lock);
+#ifdef CONFIG_SMP
+ s->cpu_slab = kmem_cache_dyn_array_alloc(nr_cpu_ids);
+ if (!s->cpu_slab)
+ goto error_lock;
+# ifdef CONFIG_NUMA
+ s->node_slab = kmem_cache_dyn_array_alloc(nr_node_ids);
+ if (!s->node_slab)
+ goto error_cpu_array;
+# endif
+#endif
+
+ if (likely(alloc)) {
+ if (!alloc_kmem_cache_nodes(s))
+ goto error_node_array;
+
+ if (!alloc_kmem_cache_cpus(s))
+ goto error_nodes;
+ }
+
+ sysfs_slab_add(s);
+ list_add(&s->list, &slab_caches);
+ up_write(&slqb_lock);
+
+ return 1;
+
+error_nodes:
+ free_kmem_cache_nodes(s);
+error_node_array:
+#if defined(CONFIG_NUMA) && defined(CONFIG_SMP)
+ kmem_cache_dyn_array_free(s->node_slab);
+error_cpu_array:
+#endif
+#ifdef CONFIG_SMP
+ kmem_cache_dyn_array_free(s->cpu_slab);
+error_lock:
+#endif
+ up_write(&slqb_lock);
+error:
+ if (flags & SLAB_PANIC)
+ panic("%s: failed to create slab `%s'\n", __func__, name);
+ return 0;
+}
+
+/**
+ * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
+ * @s: the cache we're checking against
+ * @ptr: pointer to validate
+ *
+ * This verifies that the untrusted pointer looks sane;
+ * it is _not_ a guarantee that the pointer is actually
+ * part of the slab cache in question, but it at least
+ * validates that the pointer can be dereferenced and
+ * looks half-way sane.
+ *
+ * Currently only used for dentry validation.
+ */
+int kmem_ptr_validate(struct kmem_cache *s, const void *ptr)
+{
+ unsigned long addr = (unsigned long)ptr;
+ struct slqb_page *page;
+
+ if (unlikely(addr < PAGE_OFFSET))
+ goto out;
+ if (unlikely(addr > (unsigned long)high_memory - s->size))
+ goto out;
+ if (unlikely(!IS_ALIGNED(addr, s->align)))
+ goto out;
+ if (unlikely(!kern_addr_valid(addr)))
+ goto out;
+ if (unlikely(!kern_addr_valid(addr + s->size - 1)))
+ goto out;
+ if (unlikely(!pfn_valid(addr >> PAGE_SHIFT)))
+ goto out;
+ page = virt_to_head_slqb_page(ptr);
+ if (unlikely(!(page->flags & PG_SLQB_BIT)))
+ goto out;
+ if (unlikely(page->list->cache != s)) /* XXX: ouch, racy */
+ goto out;
+ return 1;
+out:
+ return 0;
+}
+EXPORT_SYMBOL(kmem_ptr_validate);
+
+/*
+ * Determine the size of a slab object
+ */
+unsigned int kmem_cache_size(struct kmem_cache *s)
+{
+ return s->objsize;
+}
+EXPORT_SYMBOL(kmem_cache_size);
+
+const char *kmem_cache_name(struct kmem_cache *s)
+{
+ return s->name;
+}
+EXPORT_SYMBOL(kmem_cache_name);
+
+/*
+ * Release all resources used by a slab cache. No more concurrency on the
+ * slab, so we can touch remote kmem_cache_cpu structures.
+ */
+void kmem_cache_destroy(struct kmem_cache *s)
+{
+#ifdef CONFIG_NUMA
+ int node;
+#endif
+ int cpu;
+
+ down_write(&slqb_lock);
+ list_del(&s->list);
+
+ local_irq_disable();
+#ifdef CONFIG_SMP
+ for_each_online_cpu(cpu) {
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_list *l = &c->list;
+
+ flush_free_list_all(s, l);
+ flush_remote_free_cache(s, c);
+ }
+#endif
+
+ for_each_online_cpu(cpu) {
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_list *l = &c->list;
+
+ claim_remote_free_list(s, l);
+ flush_free_list_all(s, l);
+
+ WARN_ON(l->freelist.nr);
+ WARN_ON(l->nr_slabs);
+ WARN_ON(l->nr_partial);
+ }
+
+ free_kmem_cache_cpus(s);
+
+#ifdef CONFIG_NUMA
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n;
+ struct kmem_cache_list *l;
+
+ n = s->node_slab[node];
+ if (!n)
+ continue;
+ l = &n->list;
+
+ claim_remote_free_list(s, l);
+ flush_free_list_all(s, l);
+
+ WARN_ON(l->freelist.nr);
+ WARN_ON(l->nr_slabs);
+ WARN_ON(l->nr_partial);
+ }
+
+ free_kmem_cache_nodes(s);
+#endif
+ local_irq_enable();
+
+ sysfs_slab_remove(s);
+ up_write(&slqb_lock);
+}
+EXPORT_SYMBOL(kmem_cache_destroy);
+
+/********************************************************************
+ * Kmalloc subsystem
+ *******************************************************************/
+
+struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_SLQB_HIGH + 1] __cacheline_aligned;
+EXPORT_SYMBOL(kmalloc_caches);
+
+#ifdef CONFIG_ZONE_DMA
+struct kmem_cache kmalloc_caches_dma[KMALLOC_SHIFT_SLQB_HIGH + 1] __cacheline_aligned;
+EXPORT_SYMBOL(kmalloc_caches_dma);
+#endif
+
+#ifndef ARCH_KMALLOC_FLAGS
+#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
+#endif
+
+static struct kmem_cache *open_kmalloc_cache(struct kmem_cache *s,
+ const char *name, int size, gfp_t gfp_flags)
+{
+ unsigned int flags = ARCH_KMALLOC_FLAGS | SLAB_PANIC;
+
+ if (gfp_flags & SLQB_DMA)
+ flags |= SLAB_CACHE_DMA;
+
+ kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, flags, NULL, 1);
+
+ return s;
+}
+
+/*
+ * Conversion table for small slabs sizes / 8 to the index in the
+ * kmalloc array. This is necessary for slabs < 192 since we have non power
+ * of two cache sizes there. The size of larger slabs can be determined using
+ * fls.
+ */
+static s8 size_index[24] __cacheline_aligned = {
+ 3, /* 8 */
+ 4, /* 16 */
+ 5, /* 24 */
+ 5, /* 32 */
+ 6, /* 40 */
+ 6, /* 48 */
+ 6, /* 56 */
+ 6, /* 64 */
+#if L1_CACHE_BYTES < 64
+ 1, /* 72 */
+ 1, /* 80 */
+ 1, /* 88 */
+ 1, /* 96 */
+#else
+ 7,
+ 7,
+ 7,
+ 7,
+#endif
+ 7, /* 104 */
+ 7, /* 112 */
+ 7, /* 120 */
+ 7, /* 128 */
+#if L1_CACHE_BYTES < 128
+ 2, /* 136 */
+ 2, /* 144 */
+ 2, /* 152 */
+ 2, /* 160 */
+ 2, /* 168 */
+ 2, /* 176 */
+ 2, /* 184 */
+ 2 /* 192 */
+#else
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1
+#endif
+};
+
+static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+{
+ int index;
+
+ if (unlikely(size <= KMALLOC_MIN_SIZE)) {
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
+ index = KMALLOC_SHIFT_LOW;
+ goto got_index;
+ }
+
+#if L1_CACHE_BYTES >= 128
+ if (size <= 128) {
+#else
+ if (size <= 192) {
+#endif
+ index = size_index[(size - 1) / 8];
+ } else {
+ if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
+ return NULL;
+
+ index = fls(size - 1);
+ }
+
+got_index:
+ if (unlikely((flags & SLQB_DMA)))
+ return &kmalloc_caches_dma[index];
+ else
+ return &kmalloc_caches[index];
+}
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ struct kmem_cache *s;
+
+ s = get_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return __kmem_cache_alloc(s, flags, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ struct kmem_cache *s;
+
+ s = get_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return kmem_cache_alloc_node(s, flags, node);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+#endif
+
+size_t ksize(const void *object)
+{
+ struct slqb_page *page;
+ struct kmem_cache *s;
+
+ BUG_ON(!object);
+ if (unlikely(object == ZERO_SIZE_PTR))
+ return 0;
+
+ page = virt_to_head_slqb_page(object);
+ BUG_ON(!(page->flags & PG_SLQB_BIT));
+
+ s = page->list->cache;
+
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->objsize;
+
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+EXPORT_SYMBOL(ksize);
+
+void kfree(const void *object)
+{
+ struct kmem_cache *s;
+ struct slqb_page *page;
+
+ if (unlikely(ZERO_OR_NULL_PTR(object)))
+ return;
+
+ page = virt_to_head_slqb_page(object);
+ s = page->list->cache;
+
+ slab_free(s, page, (void *)object);
+}
+EXPORT_SYMBOL(kfree);
+
+static void kmem_cache_trim_percpu(void *arg)
+{
+ int cpu = smp_processor_id();
+ struct kmem_cache *s = arg;
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_list *l = &c->list;
+
+ claim_remote_free_list(s, l);
+ flush_free_list(s, l);
+#ifdef CONFIG_SMP
+ flush_remote_free_cache(s, c);
+#endif
+}
+
+int kmem_cache_shrink(struct kmem_cache *s)
+{
+#ifdef CONFIG_NUMA
+ int node;
+#endif
+
+ on_each_cpu(kmem_cache_trim_percpu, s, 1);
+
+#ifdef CONFIG_NUMA
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n;
+ struct kmem_cache_list *l;
+
+ n = s->node_slab[node];
+ if (!n)
+ continue;
+ l = &n->list;
+
+ spin_lock_irq(&n->list_lock);
+ claim_remote_free_list(s, l);
+ flush_free_list(s, l);
+ spin_unlock_irq(&n->list_lock);
+ }
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(kmem_cache_shrink);
+
+#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+static void kmem_cache_reap_percpu(void *arg)
+{
+ int cpu = smp_processor_id();
+ struct kmem_cache *s;
+ long phase = (long)arg;
+
+ list_for_each_entry(s, &slab_caches, list) {
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_list *l = &c->list;
+
+ if (phase == 0) {
+ flush_free_list_all(s, l);
+ flush_remote_free_cache(s, c);
+ }
+
+ if (phase == 1) {
+ claim_remote_free_list(s, l);
+ flush_free_list_all(s, l);
+ }
+ }
+}
+
+static void kmem_cache_reap(void)
+{
+ struct kmem_cache *s;
+ int node;
+
+ down_read(&slqb_lock);
+ on_each_cpu(kmem_cache_reap_percpu, (void *)0, 1);
+ on_each_cpu(kmem_cache_reap_percpu, (void *)1, 1);
+
+ list_for_each_entry(s, &slab_caches, list) {
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n;
+ struct kmem_cache_list *l;
+
+ n = s->node_slab[node];
+ if (!n)
+ continue;
+ l = &n->list;
+
+ spin_lock_irq(&n->list_lock);
+ claim_remote_free_list(s, l);
+ flush_free_list_all(s, l);
+ spin_unlock_irq(&n->list_lock);
+ }
+ }
+ up_read(&slqb_lock);
+}
+#endif
+
+static void cache_trim_worker(struct work_struct *w)
+{
+ struct delayed_work *work =
+ container_of(w, struct delayed_work, work);
+ struct kmem_cache *s;
+
+ if (!down_read_trylock(&slqb_lock))
+ goto out;
+
+ list_for_each_entry(s, &slab_caches, list) {
+#ifdef CONFIG_NUMA
+ int node = numa_node_id();
+ struct kmem_cache_node *n = s->node_slab[node];
+
+ if (n) {
+ struct kmem_cache_list *l = &n->list;
+
+ spin_lock_irq(&n->list_lock);
+ claim_remote_free_list(s, l);
+ flush_free_list(s, l);
+ spin_unlock_irq(&n->list_lock);
+ }
+#endif
+
+ local_irq_disable();
+ kmem_cache_trim_percpu(s);
+ local_irq_enable();
+ }
+
+ up_read(&slqb_lock);
+out:
+ schedule_delayed_work(work, round_jiffies_relative(3*HZ));
+}
+
+static DEFINE_PER_CPU(struct delayed_work, cache_trim_work);
+
+static void __cpuinit start_cpu_timer(int cpu)
+{
+ struct delayed_work *cache_trim_work = &per_cpu(cache_trim_work, cpu);
+
+ /*
+ * When this gets called from do_initcalls via cpucache_init(),
+ * init_workqueues() has already run, so keventd will be setup
+ * at that time.
+ */
+ if (keventd_up() && cache_trim_work->work.func == NULL) {
+ INIT_DELAYED_WORK(cache_trim_work, cache_trim_worker);
+ schedule_delayed_work_on(cpu, cache_trim_work,
+ __round_jiffies_relative(HZ, cpu));
+ }
+}
+
+static int __init cpucache_init(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ start_cpu_timer(cpu);
+
+ return 0;
+}
+device_initcall(cpucache_init);
+
+#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+static void slab_mem_going_offline_callback(void *arg)
+{
+ kmem_cache_reap();
+}
+
+static void slab_mem_offline_callback(void *arg)
+{
+ /* XXX: should release structures, see CPU offline comment */
+}
+
+static int slab_mem_going_online_callback(void *arg)
+{
+ struct kmem_cache *s;
+ struct kmem_cache_node *n;
+ struct memory_notify *marg = arg;
+ int nid = marg->status_change_nid;
+ int ret = 0;
+
+ /*
+ * If the node's memory is already available, then kmem_cache_node is
+ * already created. Nothing to do.
+ */
+ if (nid < 0)
+ return 0;
+
+ /*
+ * We are bringing a node online. No memory is availabe yet. We must
+ * allocate a kmem_cache_node structure in order to bring the node
+ * online.
+ */
+ down_write(&slqb_lock);
+ list_for_each_entry(s, &slab_caches, list) {
+ /*
+ * XXX: kmem_cache_alloc_node will fallback to other nodes
+ * since memory is not yet available from the node that
+ * is brought up.
+ */
+ if (s->node_slab[nid]) /* could be lefover from last online */
+ continue;
+ n = kmem_cache_alloc(&kmem_node_cache, GFP_KERNEL);
+ if (!n) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ init_kmem_cache_node(s, n);
+ s->node_slab[nid] = n;
+ }
+out:
+ up_write(&slqb_lock);
+ return ret;
+}
+
+static int slab_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ int ret = 0;
+
+ switch (action) {
+ case MEM_GOING_ONLINE:
+ ret = slab_mem_going_online_callback(arg);
+ break;
+ case MEM_GOING_OFFLINE:
+ slab_mem_going_offline_callback(arg);
+ break;
+ case MEM_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ slab_mem_offline_callback(arg);
+ break;
+ case MEM_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ break;
+ }
+
+ ret = notifier_from_errno(ret);
+ return ret;
+}
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+/********************************************************************
+ * Basic setup of slabs
+ *******************************************************************/
+
+void __init kmem_cache_init(void)
+{
+ int i;
+ unsigned int flags = SLAB_HWCACHE_ALIGN|SLAB_PANIC;
+
+ /*
+ * All the ifdefs are rather ugly here, but it's just the setup code,
+ * so it doesn't have to be too readable :)
+ */
+ kmem_cache_open(&kmem_cache_cache, "kmem_cache",
+ sizeof(struct kmem_cache), 0, flags, NULL, 0);
+#ifdef CONFIG_SMP
+ kmem_cache_open(&kmem_cpu_cache, "kmem_cache_cpu",
+ sizeof(struct kmem_cache_cpu), 0, flags, NULL, 0);
+#endif
+#ifdef CONFIG_NUMA
+ kmem_cache_open(&kmem_node_cache, "kmem_cache_node",
+ sizeof(struct kmem_cache_node), 0, flags, NULL, 0);
+#endif
+
+#ifdef CONFIG_SMP
+ for_each_possible_cpu(i) {
+ struct kmem_cache_cpu *c;
+
+ c = &per_cpu(kmem_cache_cpus, i);
+ init_kmem_cache_cpu(&kmem_cache_cache, c);
+ kmem_cache_cache.cpu_slab[i] = c;
+
+ c = &per_cpu(kmem_cpu_cpus, i);
+ init_kmem_cache_cpu(&kmem_cpu_cache, c);
+ kmem_cpu_cache.cpu_slab[i] = c;
+
+#ifdef CONFIG_NUMA
+ c = &per_cpu(kmem_node_cpus, i);
+ init_kmem_cache_cpu(&kmem_node_cache, c);
+ kmem_node_cache.cpu_slab[i] = c;
+#endif
+ }
+#else
+ init_kmem_cache_cpu(&kmem_cache_cache, &kmem_cache_cache.cpu_slab);
+#endif
+
+#ifdef CONFIG_NUMA
+ for_each_node_state(i, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n;
+
+ n = &per_cpu(kmem_cache_nodes, i);
+ init_kmem_cache_node(&kmem_cache_cache, n);
+ kmem_cache_cache.node_slab[i] = n;
+#ifdef CONFIG_SMP
+ n = &per_cpu(kmem_cpu_nodes, i);
+ init_kmem_cache_node(&kmem_cpu_cache, n);
+ kmem_cpu_cache.node_slab[i] = n;
+#endif
+ n = &per_cpu(kmem_node_nodes, i);
+ init_kmem_cache_node(&kmem_node_cache, n);
+ kmem_node_cache.node_slab[i] = n;
+ }
+#endif
+
+ /* Caches that are not of the two-to-the-power-of size */
+ if (L1_CACHE_BYTES < 64 && KMALLOC_MIN_SIZE <= 64) {
+ open_kmalloc_cache(&kmalloc_caches[1],
+ "kmalloc-96", 96, GFP_KERNEL);
+#ifdef CONFIG_ZONE_DMA
+ open_kmalloc_cache(&kmalloc_caches_dma[1],
+ "kmalloc_dma-96", 96, GFP_KERNEL|SLQB_DMA);
+#endif
+ }
+ if (L1_CACHE_BYTES < 128 && KMALLOC_MIN_SIZE <= 128) {
+ open_kmalloc_cache(&kmalloc_caches[2],
+ "kmalloc-192", 192, GFP_KERNEL);
+#ifdef CONFIG_ZONE_DMA
+ open_kmalloc_cache(&kmalloc_caches_dma[2],
+ "kmalloc_dma-192", 192, GFP_KERNEL|SLQB_DMA);
+#endif
+ }
+
+ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_SLQB_HIGH; i++) {
+ open_kmalloc_cache(&kmalloc_caches[i],
+ "kmalloc", 1 << i, GFP_KERNEL);
+#ifdef CONFIG_ZONE_DMA
+ open_kmalloc_cache(&kmalloc_caches_dma[i],
+ "kmalloc_dma", 1 << i, GFP_KERNEL|SLQB_DMA);
+#endif
+ }
+
+ /*
+ * Patch up the size_index table if we have strange large alignment
+ * requirements for the kmalloc array. This is only the case for
+ * mips it seems. The standard arches will not generate any code here.
+ *
+ * Largest permitted alignment is 256 bytes due to the way we
+ * handle the index determination for the smaller caches.
+ *
+ * Make sure that nothing crazy happens if someone starts tinkering
+ * around with ARCH_KMALLOC_MINALIGN
+ */
+ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
+ (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
+
+ for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
+ size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+
+ /* Provide the correct kmalloc names now that the caches are up */
+ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_SLQB_HIGH; i++) {
+ kmalloc_caches[i].name =
+ kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
+#ifdef CONFIG_ZONE_DMA
+ kmalloc_caches_dma[i].name =
+ kasprintf(GFP_KERNEL, "kmalloc_dma-%d", 1 << i);
+#endif
+ }
+
+#ifdef CONFIG_SMP
+ register_cpu_notifier(&slab_notifier);
+#endif
+#ifdef CONFIG_NUMA
+ hotplug_memory_notifier(slab_memory_callback, 1);
+#endif
+ /*
+ * smp_init() has not yet been called, so no worries about memory
+ * ordering with __slab_is_available.
+ */
+ __slab_is_available = 1;
+}
+
+void __init kmem_cache_init_late(void)
+{
+}
+
+/*
+ * Some basic slab creation sanity checks
+ */
+static int kmem_cache_create_ok(const char *name, size_t size,
+ size_t align, unsigned long flags)
+{
+ struct kmem_cache *tmp;
+
+ /*
+ * Sanity checks... these are all serious usage bugs.
+ */
+ if (!name || in_interrupt() || (size < sizeof(void *))) {
+ printk(KERN_ERR "kmem_cache_create(): early error in slab %s\n",
+ name);
+ dump_stack();
+
+ return 0;
+ }
+
+ down_read(&slqb_lock);
+
+ list_for_each_entry(tmp, &slab_caches, list) {
+ char x;
+ int res;
+
+ /*
+ * This happens when the module gets unloaded and doesn't
+ * destroy its slab cache and no-one else reuses the vmalloc
+ * area of the module. Print a warning.
+ */
+ res = probe_kernel_address(tmp->name, x);
+ if (res) {
+ printk(KERN_ERR
+ "SLAB: cache with size %d has lost its name\n",
+ tmp->size);
+ continue;
+ }
+
+ if (!strcmp(tmp->name, name)) {
+ printk(KERN_ERR
+ "SLAB: duplicate cache %s\n", name);
+ dump_stack();
+ up_read(&slqb_lock);
+
+ return 0;
+ }
+ }
+
+ up_read(&slqb_lock);
+
+ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
+ if (flags & SLAB_DESTROY_BY_RCU)
+ WARN_ON(flags & SLAB_POISON);
+
+ return 1;
+}
+
+struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
+{
+ struct kmem_cache *s;
+
+ if (!kmem_cache_create_ok(name, size, align, flags))
+ goto err;
+
+ s = kmem_cache_alloc(&kmem_cache_cache, GFP_KERNEL);
+ if (!s)
+ goto err;
+
+ if (kmem_cache_open(s, name, size, align, flags, ctor, 1))
+ return s;
+
+ kmem_cache_free(&kmem_cache_cache, s);
+
+err:
+ if (flags & SLAB_PANIC)
+ panic("%s: failed to create slab `%s'\n", __func__, name);
+
+ return NULL;
+}
+EXPORT_SYMBOL(kmem_cache_create);
+
+#ifdef CONFIG_SMP
+/*
+ * Use the cpu notifier to insure that the cpu slabs are flushed when
+ * necessary.
+ */
+static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ struct kmem_cache *s;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ down_write(&slqb_lock);
+ list_for_each_entry(s, &slab_caches, list) {
+ if (s->cpu_slab[cpu]) /* could be lefover last online */
+ continue;
+ s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu);
+ if (!s->cpu_slab[cpu]) {
+ up_read(&slqb_lock);
+ return NOTIFY_BAD;
+ }
+ }
+ up_write(&slqb_lock);
+ break;
+
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ start_cpu_timer(cpu);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ cancel_rearming_delayed_work(&per_cpu(cache_trim_work, cpu));
+ per_cpu(cache_trim_work, cpu).work.func = NULL;
+ break;
+
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ /*
+ * XXX: Freeing here doesn't work because objects can still be
+ * on this CPU's list. periodic timer needs to check if a CPU
+ * is offline and then try to cleanup from there. Same for node
+ * offline.
+ */
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata slab_notifier = {
+ .notifier_call = slab_cpuup_callback
+};
+
+#endif
+
+#ifdef CONFIG_SLQB_DEBUG
+void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
+{
+ struct kmem_cache *s;
+ int node = -1;
+
+ s = get_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+#ifdef CONFIG_NUMA
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
+ node = alternate_nid(s, flags, node);
+#endif
+ return slab_alloc(s, flags, node, caller);
+}
+
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller)
+{
+ struct kmem_cache *s;
+
+ s = get_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return slab_alloc(s, flags, node, caller);
+}
+#endif
+
+#if defined(CONFIG_SLQB_SYSFS) || defined(CONFIG_SLABINFO)
+struct stats_gather {
+ struct kmem_cache *s;
+ spinlock_t lock;
+ unsigned long nr_slabs;
+ unsigned long nr_partial;
+ unsigned long nr_inuse;
+ unsigned long nr_objects;
+
+#ifdef CONFIG_SLQB_STATS
+ unsigned long stats[NR_SLQB_STAT_ITEMS];
+#endif
+};
+
+static void __gather_stats(void *arg)
+{
+ unsigned long nr_slabs;
+ unsigned long nr_partial;
+ unsigned long nr_inuse;
+ struct stats_gather *gather = arg;
+ int cpu = smp_processor_id();
+ struct kmem_cache *s = gather->s;
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_list *l = &c->list;
+ struct slqb_page *page;
+#ifdef CONFIG_SLQB_STATS
+ int i;
+#endif
+
+ spin_lock(&l->page_lock);
+ nr_slabs = l->nr_slabs;
+ nr_partial = l->nr_partial;
+ nr_inuse = (nr_slabs - nr_partial) * s->objects;
+
+ list_for_each_entry(page, &l->partial, lru) {
+ nr_inuse += page->inuse;
+ }
+ spin_unlock(&l->page_lock);
+
+ spin_lock(&gather->lock);
+ gather->nr_slabs += nr_slabs;
+ gather->nr_partial += nr_partial;
+ gather->nr_inuse += nr_inuse;
+#ifdef CONFIG_SLQB_STATS
+ for (i = 0; i < NR_SLQB_STAT_ITEMS; i++)
+ gather->stats[i] += l->stats[i];
+#endif
+ spin_unlock(&gather->lock);
+}
+
+/* must be called with slqb_lock held */
+static void gather_stats_locked(struct kmem_cache *s,
+ struct stats_gather *stats)
+{
+#ifdef CONFIG_NUMA
+ int node;
+#endif
+
+ memset(stats, 0, sizeof(struct stats_gather));
+ stats->s = s;
+ spin_lock_init(&stats->lock);
+
+ on_each_cpu(__gather_stats, stats, 1);
+
+#ifdef CONFIG_NUMA
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = s->node_slab[node];
+ struct kmem_cache_list *l = &n->list;
+ struct slqb_page *page;
+ unsigned long flags;
+#ifdef CONFIG_SLQB_STATS
+ int i;
+#endif
+
+ spin_lock_irqsave(&n->list_lock, flags);
+#ifdef CONFIG_SLQB_STATS
+ for (i = 0; i < NR_SLQB_STAT_ITEMS; i++)
+ stats->stats[i] += l->stats[i];
+#endif
+ stats->nr_slabs += l->nr_slabs;
+ stats->nr_partial += l->nr_partial;
+ stats->nr_inuse += (l->nr_slabs - l->nr_partial) * s->objects;
+
+ list_for_each_entry(page, &l->partial, lru) {
+ stats->nr_inuse += page->inuse;
+ }
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+#endif
+
+ stats->nr_objects = stats->nr_slabs * s->objects;
+}
+
+#ifdef CONFIG_SLQB_SYSFS
+static void gather_stats(struct kmem_cache *s, struct stats_gather *stats)
+{
+ down_read(&slqb_lock); /* hold off hotplug */
+ gather_stats_locked(s, stats);
+ up_read(&slqb_lock);
+}
+#endif
+#endif
+
+/*
+ * The /proc/slabinfo ABI
+ */
+#ifdef CONFIG_SLABINFO
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+static void print_slabinfo_header(struct seq_file *m)
+{
+ seq_puts(m, "slabinfo - version: 2.1\n");
+ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ "<objperslab> <pagesperslab>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+ seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+
+ down_read(&slqb_lock);
+ if (!n)
+ print_slabinfo_header(m);
+
+ return seq_list_start(&slab_caches, *pos);
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &slab_caches, pos);
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ up_read(&slqb_lock);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ struct stats_gather stats;
+ struct kmem_cache *s;
+
+ s = list_entry(p, struct kmem_cache, list);
+
+ gather_stats_locked(s, &stats);
+
+ seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, stats.nr_inuse,
+ stats.nr_objects, s->size, s->objects, (1 << s->order));
+ seq_printf(m, " : tunables %4u %4u %4u", slab_hiwater(s),
+ slab_freebatch(s), 0);
+ seq_printf(m, " : slabdata %6lu %6lu %6lu", stats.nr_slabs,
+ stats.nr_slabs, 0UL);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static const struct seq_operations slabinfo_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
+};
+
+static int slabinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &slabinfo_op);
+}
+
+static const struct file_operations proc_slabinfo_operations = {
+ .open = slabinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init slab_proc_init(void)
+{
+ proc_create("slabinfo", S_IWUSR|S_IRUGO, NULL,
+ &proc_slabinfo_operations);
+ return 0;
+}
+module_init(slab_proc_init);
+#endif /* CONFIG_SLABINFO */
+
+#ifdef CONFIG_SLQB_SYSFS
+/*
+ * sysfs API
+ */
+#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
+#define to_slab(n) container_of(n, struct kmem_cache, kobj);
+
+struct slab_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kmem_cache *s, char *buf);
+ ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
+};
+
+#define SLAB_ATTR_RO(_name) \
+ static struct slab_attribute _name##_attr = __ATTR_RO(_name)
+
+#define SLAB_ATTR(_name) \
+ static struct slab_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->size);
+}
+SLAB_ATTR_RO(slab_size);
+
+static ssize_t align_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->align);
+}
+SLAB_ATTR_RO(align);
+
+static ssize_t object_size_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->objsize);
+}
+SLAB_ATTR_RO(object_size);
+
+static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->objects);
+}
+SLAB_ATTR_RO(objs_per_slab);
+
+static ssize_t order_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->order);
+}
+SLAB_ATTR_RO(order);
+
+static ssize_t ctor_show(struct kmem_cache *s, char *buf)
+{
+ if (s->ctor) {
+ int n = sprint_symbol(buf, (unsigned long)s->ctor);
+
+ return n + sprintf(buf + n, "\n");
+ }
+ return 0;
+}
+SLAB_ATTR_RO(ctor);
+
+static ssize_t slabs_show(struct kmem_cache *s, char *buf)
+{
+ struct stats_gather stats;
+
+ gather_stats(s, &stats);
+
+ return sprintf(buf, "%lu\n", stats.nr_slabs);
+}
+SLAB_ATTR_RO(slabs);
+
+static ssize_t objects_show(struct kmem_cache *s, char *buf)
+{
+ struct stats_gather stats;
+
+ gather_stats(s, &stats);
+
+ return sprintf(buf, "%lu\n", stats.nr_inuse);
+}
+SLAB_ATTR_RO(objects);
+
+static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
+{
+ struct stats_gather stats;
+
+ gather_stats(s, &stats);
+
+ return sprintf(buf, "%lu\n", stats.nr_objects);
+}
+SLAB_ATTR_RO(total_objects);
+
+static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
+}
+SLAB_ATTR_RO(reclaim_account);
+
+static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
+}
+SLAB_ATTR_RO(hwcache_align);
+
+#ifdef CONFIG_ZONE_DMA
+static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
+}
+SLAB_ATTR_RO(cache_dma);
+#endif
+
+static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+}
+SLAB_ATTR_RO(destroy_by_rcu);
+
+static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
+}
+SLAB_ATTR_RO(red_zone);
+
+static ssize_t poison_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
+}
+SLAB_ATTR_RO(poison);
+
+static ssize_t store_user_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
+}
+SLAB_ATTR_RO(store_user);
+
+static ssize_t hiwater_store(struct kmem_cache *s,
+ const char *buf, size_t length)
+{
+ long hiwater;
+ int err;
+
+ err = strict_strtol(buf, 10, &hiwater);
+ if (err)
+ return err;
+
+ if (hiwater < 0)
+ return -EINVAL;
+
+ s->hiwater = hiwater;
+
+ return length;
+}
+
+static ssize_t hiwater_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", slab_hiwater(s));
+}
+SLAB_ATTR(hiwater);
+
+static ssize_t freebatch_store(struct kmem_cache *s,
+ const char *buf, size_t length)
+{
+ long freebatch;
+ int err;
+
+ err = strict_strtol(buf, 10, &freebatch);
+ if (err)
+ return err;
+
+ if (freebatch <= 0 || freebatch - 1 > s->hiwater)
+ return -EINVAL;
+
+ s->freebatch = freebatch;
+
+ return length;
+}
+
+static ssize_t freebatch_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", slab_freebatch(s));
+}
+SLAB_ATTR(freebatch);
+
+#ifdef CONFIG_SLQB_STATS
+static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
+{
+ struct stats_gather stats;
+ int len;
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
+
+ gather_stats(s, &stats);
+
+ len = sprintf(buf, "%lu", stats.stats[si]);
+
+#ifdef CONFIG_SMP
+ for_each_online_cpu(cpu) {
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_list *l = &c->list;
+
+ if (len < PAGE_SIZE - 20)
+ len += sprintf(buf+len, " C%d=%lu", cpu, l->stats[si]);
+ }
+#endif
+ return len + sprintf(buf + len, "\n");
+}
+
+#define STAT_ATTR(si, text) \
+static ssize_t text##_show(struct kmem_cache *s, char *buf) \
+{ \
+ return show_stat(s, buf, si); \
+} \
+SLAB_ATTR_RO(text); \
+
+STAT_ATTR(ALLOC, alloc);
+STAT_ATTR(ALLOC_SLAB_FILL, alloc_slab_fill);
+STAT_ATTR(ALLOC_SLAB_NEW, alloc_slab_new);
+STAT_ATTR(FREE, free);
+STAT_ATTR(FREE_REMOTE, free_remote);
+STAT_ATTR(FLUSH_FREE_LIST, flush_free_list);
+STAT_ATTR(FLUSH_FREE_LIST_OBJECTS, flush_free_list_objects);
+STAT_ATTR(FLUSH_FREE_LIST_REMOTE, flush_free_list_remote);
+STAT_ATTR(FLUSH_SLAB_PARTIAL, flush_slab_partial);
+STAT_ATTR(FLUSH_SLAB_FREE, flush_slab_free);
+STAT_ATTR(FLUSH_RFREE_LIST, flush_rfree_list);
+STAT_ATTR(FLUSH_RFREE_LIST_OBJECTS, flush_rfree_list_objects);
+STAT_ATTR(CLAIM_REMOTE_LIST, claim_remote_list);
+STAT_ATTR(CLAIM_REMOTE_LIST_OBJECTS, claim_remote_list_objects);
+#endif
+
+static struct attribute *slab_attrs[] = {
+ &slab_size_attr.attr,
+ &object_size_attr.attr,
+ &objs_per_slab_attr.attr,
+ &order_attr.attr,
+ &objects_attr.attr,
+ &total_objects_attr.attr,
+ &slabs_attr.attr,
+ &ctor_attr.attr,
+ &align_attr.attr,
+ &hwcache_align_attr.attr,
+ &reclaim_account_attr.attr,
+ &destroy_by_rcu_attr.attr,
+ &red_zone_attr.attr,
+ &poison_attr.attr,
+ &store_user_attr.attr,
+ &hiwater_attr.attr,
+ &freebatch_attr.attr,
+#ifdef CONFIG_ZONE_DMA
+ &cache_dma_attr.attr,
+#endif
+#ifdef CONFIG_SLQB_STATS
+ &alloc_attr.attr,
+ &alloc_slab_fill_attr.attr,
+ &alloc_slab_new_attr.attr,
+ &free_attr.attr,
+ &free_remote_attr.attr,
+ &flush_free_list_attr.attr,
+ &flush_free_list_objects_attr.attr,
+ &flush_free_list_remote_attr.attr,
+ &flush_slab_partial_attr.attr,
+ &flush_slab_free_attr.attr,
+ &flush_rfree_list_attr.attr,
+ &flush_rfree_list_objects_attr.attr,
+ &claim_remote_list_attr.attr,
+ &claim_remote_list_objects_attr.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group slab_attr_group = {
+ .attrs = slab_attrs,
+};
+
+static ssize_t slab_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct slab_attribute *attribute;
+ struct kmem_cache *s;
+ int err;
+
+ attribute = to_slab_attr(attr);
+ s = to_slab(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ err = attribute->show(s, buf);
+
+ return err;
+}
+
+static ssize_t slab_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t len)
+{
+ struct slab_attribute *attribute;
+ struct kmem_cache *s;
+ int err;
+
+ attribute = to_slab_attr(attr);
+ s = to_slab(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ err = attribute->store(s, buf, len);
+
+ return err;
+}
+
+static void kmem_cache_release(struct kobject *kobj)
+{
+ struct kmem_cache *s = to_slab(kobj);
+
+ kmem_cache_free(&kmem_cache_cache, s);
+}
+
+static struct sysfs_ops slab_sysfs_ops = {
+ .show = slab_attr_show,
+ .store = slab_attr_store,
+};
+
+static struct kobj_type slab_ktype = {
+ .sysfs_ops = &slab_sysfs_ops,
+ .release = kmem_cache_release
+};
+
+static int uevent_filter(struct kset *kset, struct kobject *kobj)
+{
+ struct kobj_type *ktype = get_ktype(kobj);
+
+ if (ktype == &slab_ktype)
+ return 1;
+ return 0;
+}
+
+static struct kset_uevent_ops slab_uevent_ops = {
+ .filter = uevent_filter,
+};
+
+static struct kset *slab_kset;
+
+static int sysfs_available __read_mostly;
+
+static int sysfs_slab_add(struct kmem_cache *s)
+{
+ int err;
+
+ if (!sysfs_available)
+ return 0;
+
+ s->kobj.kset = slab_kset;
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, s->name);
+ if (err) {
+ kobject_put(&s->kobj);
+ return err;
+ }
+
+ err = sysfs_create_group(&s->kobj, &slab_attr_group);
+ if (err)
+ return err;
+
+ kobject_uevent(&s->kobj, KOBJ_ADD);
+
+ return 0;
+}
+
+static void sysfs_slab_remove(struct kmem_cache *s)
+{
+ kobject_uevent(&s->kobj, KOBJ_REMOVE);
+ kobject_del(&s->kobj);
+ kobject_put(&s->kobj);
+}
+
+static int __init slab_sysfs_init(void)
+{
+ struct kmem_cache *s;
+ int err;
+
+ slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
+ if (!slab_kset) {
+ printk(KERN_ERR "Cannot register slab subsystem.\n");
+ return -ENOSYS;
+ }
+
+ down_write(&slqb_lock);
+
+ sysfs_available = 1;
+
+ list_for_each_entry(s, &slab_caches, list) {
+ err = sysfs_slab_add(s);
+ if (err)
+ printk(KERN_ERR "SLQB: Unable to add boot slab %s"
+ " to sysfs\n", s->name);
+ }
+
+ up_write(&slqb_lock);
+
+ return 0;
+}
+device_initcall(slab_sysfs_init);
+
+#endif
diff --git a/mm/slub.c b/mm/slub.c
index 819f056b39c6..c5c5d39ba410 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2092,8 +2092,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
*/
#define NR_KMEM_CACHE_CPU 100
-static DEFINE_PER_CPU(struct kmem_cache_cpu,
- kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
+static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
+ kmem_cache_cpu);
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
@@ -2595,6 +2595,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
down_write(&slub_lock);
s->refcount--;
if (!s->refcount) {
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 42cd38eba79f..5ae6b8b78c80 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -34,6 +34,7 @@ static const struct address_space_operations swap_aops = {
};
static struct backing_dev_info swap_backing_dev_info = {
+ .name = "swap",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
.unplug_io_fn = swap_unplug_io_fn,
};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 54155268dfca..27558aa0b10a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1715,7 +1715,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
*/
if (total_scanned > sc->swap_cluster_max +
sc->swap_cluster_max / 2) {
- wakeup_pdflush(laptop_mode ? 0 : total_scanned);
+ wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
sc->may_writepage = 1;
}
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index a2a1814c7a8d..8c2588e4edc0 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -735,12 +735,14 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
if (!*p)
continue;
token = match_token(p, tokens, args);
- r = match_int(&args[0], &option);
- if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
- ret = r;
- continue;
+ if (token != Opt_err) {
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ ret = r;
+ continue;
+ }
}
switch (token) {
case Opt_port:
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b18676870d55..a9f7afb6ee35 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -374,6 +374,7 @@ static void hidp_process_hid_control(struct hidp_session *session,
/* Kill session thread */
atomic_inc(&session->terminate);
+ hidp_schedule(session);
}
}
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index bd0a4c1bced0..7ce1a24735c8 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -50,7 +50,9 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#define VERSION "2.13"
+#define VERSION "2.14"
+
+static int enable_ertm = 0;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
@@ -715,12 +717,16 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
pi->imtu = l2cap_pi(parent)->imtu;
pi->omtu = l2cap_pi(parent)->omtu;
+ pi->mode = l2cap_pi(parent)->mode;
+ pi->fcs = l2cap_pi(parent)->fcs;
pi->sec_level = l2cap_pi(parent)->sec_level;
pi->role_switch = l2cap_pi(parent)->role_switch;
pi->force_reliable = l2cap_pi(parent)->force_reliable;
} else {
pi->imtu = L2CAP_DEFAULT_MTU;
pi->omtu = 0;
+ pi->mode = L2CAP_MODE_BASIC;
+ pi->fcs = L2CAP_FCS_CRC16;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
pi->force_reliable = 0;
@@ -956,6 +962,18 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
goto done;
}
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ if (enable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
switch (sk->sk_state) {
case BT_CONNECT:
case BT_CONNECT2:
@@ -1007,6 +1025,18 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
goto done;
}
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ if (enable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
if (!l2cap_pi(sk)->psm) {
bdaddr_t *src = &bt_sk(sk)->src;
u16 psm;
@@ -1257,7 +1287,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = L2CAP_MODE_BASIC;
+ opts.mode = l2cap_pi(sk)->mode;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -1265,8 +1295,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
- l2cap_pi(sk)->imtu = opts.imtu;
- l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->mode = opts.mode;
break;
case L2CAP_LM:
@@ -1379,7 +1410,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = L2CAP_MODE_BASIC;
+ opts.mode = l2cap_pi(sk)->mode;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -1712,12 +1743,29 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
void *ptr = req->data;
BT_DBG("sk %p", sk);
- if (pi->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ if (pi->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+ break;
+
+ case L2CAP_MODE_ERTM:
+ rfc.mode = L2CAP_MODE_ERTM;
+ rfc.txwin_size = L2CAP_DEFAULT_RX_WINDOW;
+ rfc.max_transmit = L2CAP_DEFAULT_MAX_RECEIVE;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_RX_APDU);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+ break;
+ }
/* FIXME: Need actual value of the flush timeout */
//if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
@@ -1797,7 +1845,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
rfc.mode = L2CAP_MODE_BASIC;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc);
}
}
@@ -2205,10 +2253,13 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
if (type == L2CAP_IT_FEAT_MASK) {
u8 buf[8];
+ u32 feat_mask = l2cap_feat_mask;
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
+ if (enable_ertm)
+ feat_mask |= L2CAP_FEAT_ERTM;
+ put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
@@ -2828,6 +2879,9 @@ EXPORT_SYMBOL(l2cap_load);
module_init(l2cap_init);
module_exit(l2cap_exit);
+module_param(enable_ertm, bool, 0644);
+MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 51ae0c3e470a..13c27f17192c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -359,20 +359,9 @@ static void sco_sock_kill(struct sock *sk)
sock_put(sk);
}
-/* Close socket.
- * Must be called on unlocked socket.
- */
-static void sco_sock_close(struct sock *sk)
+static void __sco_sock_close(struct sock *sk)
{
- struct sco_conn *conn;
-
- sco_sock_clear_timer(sk);
-
- lock_sock(sk);
-
- conn = sco_pi(sk)->conn;
-
- BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket);
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
@@ -390,9 +379,15 @@ static void sco_sock_close(struct sock *sk)
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
+}
+/* Must be called on unlocked socket. */
+static void sco_sock_close(struct sock *sk)
+{
+ sco_sock_clear_timer(sk);
+ lock_sock(sk);
+ __sco_sock_close(sk);
release_sock(sk);
-
sco_sock_kill(sk);
}
@@ -748,6 +743,30 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
return err;
}
+static int sco_sock_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (!sk->sk_shutdown) {
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ sco_sock_clear_timer(sk);
+ __sco_sock_close(sk);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+ release_sock(sk);
+ return err;
+}
+
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -969,7 +988,7 @@ static const struct proto_ops sco_sock_ops = {
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
- .shutdown = sock_no_shutdown,
+ .shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 9aac5213105a..e1241c76239a 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -93,7 +93,7 @@ static void __exit br_deinit(void)
unregister_pernet_subsys(&br_net_ops);
- synchronize_net();
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
br_netfilter_fini();
#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
diff --git a/net/core/dev.c b/net/core/dev.c
index 60b572812278..70c27e0c7c32 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2823,9 +2823,11 @@ static void net_rx_action(struct softirq_action *h)
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
- if (unlikely(napi_disable_pending(n)))
- __napi_complete(n);
- else
+ if (unlikely(napi_disable_pending(n))) {
+ local_irq_enable();
+ napi_complete(n);
+ local_irq_disable();
+ } else
list_move_tail(&n->poll_list, list);
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d351b8db0df5..77d40289653c 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2413,6 +2413,8 @@ static void __exit decnet_exit(void)
proc_net_remove(&init_net, "decnet");
proto_unregister(&dn_proto);
+
+ rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */
}
module_exit(decnet_exit);
#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 105ad10876af..27eda9fdf3c2 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -276,6 +276,9 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
else
return NULL;
+ if (!dev)
+ return NULL;
+
if (dev->type != ARPHRD_IEEE802154) {
dev_put(dev);
return NULL;
@@ -521,3 +524,6 @@ static void __exit ieee802154_nl_exit(void)
}
module_exit(ieee802154_nl_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ieee 802.15.4 configuration interface");
+
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8a3881e28aca..c29d75d8f1b1 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
* cache.
*/
- /*
- * Special case: IPv4 duplicate address detection packet (RFC2131)
- * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
- */
- if (sip == 0 || tip == sip) {
+ /* Special case: IPv4 duplicate address detection packet (RFC2131) */
+ if (sip == 0) {
if (arp->ar_op == htons(ARPOP_REQUEST) &&
inet_addr_type(net, tip) == RTN_LOCAL &&
!arp_ignore(in_dev, sip, tip))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 012cf5a68581..00a54b246dfe 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
(struct node *)tn, wasfull);
tp = node_parent((struct node *) tn);
+ if (!tp)
+ rcu_assign_pointer(t->trie, (struct node *)tn);
+
tnode_free_flush();
if (!tp)
break;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 490ce20faf38..db46b4b5b2b9 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -440,6 +440,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
/* Remove any debris in the socket control block */
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ /* Must drop socket now because of tproxy. */
+ skb_orphan(skb);
+
return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 155c008626c8..09172a65d9b6 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
ct, ctinfo);
/* Tell TCP window tracking about seq change */
nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
- ct, CTINFO2DIR(ctinfo));
+ ct, CTINFO2DIR(ctinfo),
+ (int)rep_len - (int)match_len);
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
}
@@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
struct tcphdr *tcph;
int dir;
__be32 newseq, newack;
+ s16 seqoff, ackoff;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way, *other_way;
@@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph = (void *)skb->data + ip_hdrlen(skb);
if (after(ntohl(tcph->seq), this_way->correction_pos))
- newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
+ seqoff = this_way->offset_after;
else
- newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
+ seqoff = this_way->offset_before;
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
other_way->correction_pos))
- newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
+ ackoff = other_way->offset_after;
else
- newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
+ ackoff = other_way->offset_before;
+
+ newseq = htonl(ntohl(tcph->seq) + seqoff);
+ newack = htonl(ntohl(tcph->ack_seq) - ackoff);
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
@@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
return 0;
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir);
+ nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
return 1;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index cd2b97f1b6e1..a6e0e077ac33 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -37,12 +37,13 @@ __initcall(init_syncookies);
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
+static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
+ ipv4_cookie_scratch);
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
{
- __u32 *tmp = __get_cpu_var(cookie_scratch);
+ __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 17b89c523f9d..7870a535dac6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
iov++;
while (seglen > 0) {
- int copy;
+ int copy = 0;
+ int max = size_goal;
skb = tcp_write_queue_tail(sk);
+ if (tcp_send_head(sk)) {
+ if (skb->ip_summed == CHECKSUM_NONE)
+ max = mss_now;
+ copy = max - skb->len;
+ }
- if (!tcp_send_head(sk) ||
- (copy = size_goal - skb->len) <= 0) {
-
+ if (copy <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
@@ -930,6 +934,7 @@ new_segment:
skb_entail(sk, skb);
copy = size_goal;
+ max = size_goal;
}
/* Try to append data to the end of skb. */
@@ -1028,7 +1033,7 @@ new_segment:
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len < size_goal || (flags & MSG_OOB))
+ if (skb->len < max || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43bbba7926ee..f8d67ccc64f3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -128,7 +128,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
goto kill_with_rst;
/* Dup ACK? */
- if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
+ if (!th->ack ||
+ !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 416fc4c2e7eb..5bdf08d312d9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
- if (skb->len <= mss_now || !sk_can_gso(sk)) {
+ if (skb->len <= mss_now || !sk_can_gso(sk) ||
+ skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 60d918c96a4f..0071ee6f441f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -136,7 +136,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
- if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
+ if (xprth + 4 < skb->data ||
+ pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports = (__be16 *)xprth;
fl->fl_ip_sport = ports[!!reverse];
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8c1e86afbbf5..43b3c9f89c12 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1916,8 +1916,32 @@ ok:
update_lft = 1;
else if (stored_lft <= MIN_VALID_LIFETIME) {
/* valid_lft <= stored_lft is always true */
- /* XXX: IPsec */
- update_lft = 0;
+ /*
+ * RFC 4862 Section 5.5.3e:
+ * "Note that the preferred lifetime of
+ * the corresponding address is always
+ * reset to the Preferred Lifetime in
+ * the received Prefix Information
+ * option, regardless of whether the
+ * valid lifetime is also reset or
+ * ignored."
+ *
+ * So if the preferred lifetime in
+ * this advertisement is different
+ * than what we have stored, but the
+ * valid lifetime is invalid, just
+ * reset prefered_lft.
+ *
+ * We must set the valid lifetime
+ * to the stored lifetime since we'll
+ * be updating the timestamp below,
+ * else we'll set it back to the
+ * minumum.
+ */
+ if (prefered_lft != ifp->prefered_lft) {
+ valid_lft = stored_lft;
+ update_lft = 1;
+ }
} else {
valid_lft = MIN_VALID_LIFETIME;
if (valid_lft < prefered_lft)
@@ -3085,7 +3109,7 @@ restart:
spin_unlock(&ifp->lock);
continue;
} else if (age >= ifp->prefered_lft) {
- /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */
+ /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
int deprecate = 0;
if (!(ifp->flags&IFA_F_DEPRECATED)) {
@@ -3362,7 +3386,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
long tval = (jiffies - ifa->tstamp)/HZ;
- preferred -= tval;
+ if (preferred > tval)
+ preferred -= tval;
+ else
+ preferred = 0;
if (valid != INFINITY_LIFE_TIME)
valid -= tval;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 85b3d0036afd..caa0278d30a9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1284,6 +1284,8 @@ static void __exit inet6_exit(void)
proto_unregister(&udplitev6_prot);
proto_unregister(&udpv6_prot);
proto_unregister(&tcpv6_prot);
+
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
module_exit(inet6_exit);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index c3a07d75b5f5..6d6a4277c677 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -139,6 +139,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
rcu_read_unlock();
+ /* Must drop socket now because of tproxy. */
+ skb_orphan(skb);
+
return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip6_rcv_finish);
err:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8c2513982b61..6b6ae913b5d4 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
return child;
}
-static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
+static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
+ ipv6_cookie_scratch);
static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
__be16 sport, __be16 dport, u32 count, int c)
{
- __u32 *tmp = __get_cpu_var(cookie_scratch);
+ __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
/*
* we have 320 bits of information to hash, copy in the remaining
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index b4b16a43f277..3a3c677bc0f2 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -157,7 +157,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
- while (pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
+ while (nh + offset + 1 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
nh = skb_network_header(skb);
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
@@ -177,7 +178,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 4 - skb->data)) {
+ if (!onlyproto && (nh + offset + 4 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
__be16 *ports = (__be16 *)exthdr;
fl->fl_ip_sport = ports[!!reverse];
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fc712e60705d..11cf45bce38a 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -494,7 +494,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
* should it be using the interface and enqueuing
* frames at this very time on another CPU.
*/
- synchronize_rcu();
+ rcu_barrier(); /* Wait for RX path and call_rcu()'s */
skb_queue_purge(&sdata->u.mesh.skb_queue);
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index afde8f991646..2032dfe25ca8 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -617,8 +617,10 @@ err1:
void nf_conntrack_expect_fini(struct net *net)
{
exp_proc_remove(net);
- if (net_eq(net, &init_net))
+ if (net_eq(net, &init_net)) {
+ rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
+ }
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
nf_ct_expect_hsize);
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 4b2c769d555f..fef95be334bd 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -186,6 +186,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
- synchronize_rcu();
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 33fc0a443f3d..97a82ba75376 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -720,8 +720,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
/* Caller must linearize skb at tcp header. */
void nf_conntrack_tcp_update(const struct sk_buff *skb,
unsigned int dataoff,
- struct nf_conn *ct,
- int dir)
+ struct nf_conn *ct, int dir,
+ s16 offset)
{
const struct tcphdr *tcph = (const void *)skb->data + dataoff;
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
@@ -734,7 +734,7 @@ void nf_conntrack_tcp_update(const struct sk_buff *skb,
/*
* We have to worry for the ack in the reply packet only...
*/
- if (after(end, ct->proto.tcp.seen[dir].td_end))
+ if (ct->proto.tcp.seen[dir].td_end + offset == end)
ct->proto.tcp.seen[dir].td_end = end;
ct->proto.tcp.last_end = end;
spin_unlock_bh(&ct->lock);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 0b7139f3dd78..fc581800698e 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -129,7 +129,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr,
static inline bool
conntrack_mt_origsrc(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
@@ -138,7 +138,7 @@ conntrack_mt_origsrc(const struct nf_conn *ct,
static inline bool
conntrack_mt_origdst(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
@@ -147,7 +147,7 @@ conntrack_mt_origdst(const struct nf_conn *ct,
static inline bool
conntrack_mt_replsrc(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
@@ -156,7 +156,7 @@ conntrack_mt_replsrc(const struct nf_conn *ct,
static inline bool
conntrack_mt_repldst(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
@@ -164,7 +164,7 @@ conntrack_mt_repldst(const struct nf_conn *ct,
}
static inline bool
-ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
+ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
@@ -204,7 +204,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
static bool
conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
- const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int statebit;
@@ -278,6 +278,16 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
+static bool
+conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
+{
+ const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo;
+ struct xt_match_param newpar = *par;
+
+ newpar.matchinfo = *info;
+ return conntrack_mt(skb, &newpar);
+}
+
static bool conntrack_mt_check(const struct xt_mtchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
@@ -288,11 +298,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par)
return true;
}
+static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ struct xt_conntrack_mtinfo2 *up;
+ int ret = conntrack_mt_check(par);
+
+ if (ret < 0)
+ return ret;
+
+ up = kmalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ nf_ct_l3proto_module_put(par->family);
+ return -ENOMEM;
+ }
+
+ /*
+ * The strategy here is to minimize the overhead of v1 matching,
+ * by prebuilding a v2 struct and putting the pointer into the
+ * v1 dataspace.
+ */
+ memcpy(up, info, offsetof(typeof(*info), state_mask));
+ up->state_mask = info->state_mask;
+ up->status_mask = info->status_mask;
+ *(void **)info = up;
+ return true;
+}
+
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_l3proto_module_put(par->family);
}
+static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
+{
+ struct xt_conntrack_mtinfo2 **info = par->matchinfo;
+ kfree(*info);
+ conntrack_mt_destroy(par);
+}
+
#ifdef CONFIG_COMPAT
struct compat_xt_conntrack_info
{
@@ -363,6 +407,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
.revision = 1,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
+ .match = conntrack_mt_v1,
+ .checkentry = conntrack_mt_check_v1,
+ .destroy = conntrack_mt_destroy_v1,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "conntrack",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .matchsize = sizeof(struct xt_conntrack_mtinfo2),
.match = conntrack_mt,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 80a322d77909..b0d6ddd82a9d 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -69,10 +69,27 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
return NULL;
}
-static void __phonet_device_free(struct phonet_device *pnd)
+static void phonet_device_destroy(struct net_device *dev)
{
- list_del(&pnd->list);
- kfree(pnd);
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+
+ ASSERT_RTNL();
+
+ spin_lock_bh(&pndevs->lock);
+ pnd = __phonet_get(dev);
+ if (pnd)
+ list_del(&pnd->list);
+ spin_unlock_bh(&pndevs->lock);
+
+ if (pnd) {
+ u8 addr;
+
+ for (addr = find_first_bit(pnd->addrs, 64); addr < 64;
+ addr = find_next_bit(pnd->addrs, 64, 1+addr))
+ phonet_address_notify(RTM_DELADDR, dev, addr);
+ kfree(pnd);
+ }
}
struct net_device *phonet_device_get(struct net *net)
@@ -126,8 +143,10 @@ int phonet_address_del(struct net_device *dev, u8 addr)
pnd = __phonet_get(dev);
if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
err = -EADDRNOTAVAIL;
- else if (bitmap_empty(pnd->addrs, 64))
- __phonet_device_free(pnd);
+ else if (bitmap_empty(pnd->addrs, 64)) {
+ list_del(&pnd->list);
+ kfree(pnd);
+ }
spin_unlock_bh(&pndevs->lock);
return err;
}
@@ -181,18 +200,8 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what,
{
struct net_device *dev = arg;
- if (what == NETDEV_UNREGISTER) {
- struct phonet_device_list *pndevs;
- struct phonet_device *pnd;
-
- /* Destroy phonet-specific device data */
- pndevs = phonet_device_list(dev_net(dev));
- spin_lock_bh(&pndevs->lock);
- pnd = __phonet_get(dev);
- if (pnd)
- __phonet_device_free(pnd);
- spin_unlock_bh(&pndevs->lock);
- }
+ if (what == NETDEV_UNREGISTER)
+ phonet_device_destroy(dev);
return 0;
}
@@ -218,11 +227,12 @@ static int phonet_init_net(struct net *net)
static void phonet_exit_net(struct net *net)
{
struct phonet_net *pnn = net_generic(net, phonet_net_id);
- struct phonet_device *pnd, *n;
-
- list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list)
- __phonet_device_free(pnd);
+ struct net_device *dev;
+ rtnl_lock();
+ for_each_netdev(net, dev)
+ phonet_device_destroy(dev);
+ rtnl_unlock();
kfree(pnn);
}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index cec4e5951681..f8b4cee434c2 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -32,7 +32,7 @@
static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
u32 pid, u32 seq, int event);
-static void rtmsg_notify(int event, struct net_device *dev, u8 addr)
+void phonet_address_notify(int event, struct net_device *dev, u8 addr)
{
struct sk_buff *skb;
int err = -ENOBUFS;
@@ -94,7 +94,7 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
else
err = phonet_address_del(dev, pnaddr);
if (!err)
- rtmsg_notify(nlh->nlmsg_type, dev, pnaddr);
+ phonet_address_notify(nlh->nlmsg_type, dev, pnaddr);
return err;
}
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 02e3e3d50d4a..301ae51ae409 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -37,7 +37,7 @@
#include "rds.h"
#include "ib.h"
-DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
static char *rds_ib_stat_names[] = {
"ib_connect_raced",
diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c
index ccc7e8f0bf0e..fafea3cc92d7 100644
--- a/net/rds/iw_stats.c
+++ b/net/rds/iw_stats.c
@@ -37,7 +37,7 @@
#include "rds.h"
#include "iw.h"
-DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats);
static char *rds_iw_stat_names[] = {
"iw_connect_raced",
diff --git a/net/rds/page.c b/net/rds/page.c
index c460743a89ad..de7bb84bcd78 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -39,7 +39,7 @@ struct rds_page_remainder {
unsigned long r_offset;
};
-DEFINE_PER_CPU(struct rds_page_remainder, rds_page_remainders) ____cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
/*
* returns 0 on success or -errno on failure.
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b76411444515..b94c21190566 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -407,7 +407,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
}
dst = dst_clone(tp->dst);
skb_dst_set(nskb, dst);
- if (dst)
+ if (!dst)
goto no_route;
/* Build the SCTP header. */
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 843629f55763..adaa81982f74 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -66,6 +66,7 @@ cleanup_sunrpc(void)
#ifdef CONFIG_PROC_FS
rpc_proc_exit();
#endif
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
MODULE_LICENSE("GPL");
module_init(init_sunrpc);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 5151f9f6c573..0cf5e8c27a10 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -730,12 +730,12 @@ static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
goto err;
mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
- if (!mr)
+ if (IS_ERR(mr))
goto err_free_frmr;
pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
RPCSVC_MAXPAGES);
- if (!pl)
+ if (IS_ERR(pl))
goto err_free_mr;
frmr->mr = mr;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index d31ccb487730..faf54c6bf96b 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -292,8 +292,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
- .name = "cbc(cast128)",
- .compat = "cast128",
+ .name = "cbc(cast5)",
+ .compat = "cast5",
.uinfo = {
.encr = {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5f1f86565f16..f2f7c638083e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -668,22 +668,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)daddr,
- (struct in6_addr *)
- x->id.daddr.a6))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
@@ -699,26 +687,11 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.family != family ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
+ xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4 ||
- x->props.saddr.a4 != saddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)daddr,
- (struct in6_addr *)
- x->id.daddr.a6) ||
- !ipv6_addr_equal((struct in6_addr *)saddr,
- (struct in6_addr *)
- x->props.saddr.a6))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
@@ -1001,25 +974,11 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
x->props.family != family ||
x->km.state != XFRM_STATE_ACQ ||
x->id.spi != 0 ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
+ xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4 ||
- x->props.saddr.a4 != saddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
- (struct in6_addr *)daddr) ||
- !ipv6_addr_equal((struct in6_addr *)
- x->props.saddr.a6,
- (struct in6_addr *)saddr))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
diff --git a/scripts/.gitignore b/scripts/.gitignore
index b939fbd01195..52cab46ae35a 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -7,3 +7,4 @@ pnmtologo
bin2c
unifdef
binoffset
+ihex2fw
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 72c15205bb2b..f9c1e5408318 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -16,8 +16,7 @@
* tells make when to remake a file.
*
* To use this list as-is however has the drawback that virtually
- * every file in the kernel includes <linux/config.h> which then again
- * includes <linux/autoconf.h>
+ * every file in the kernel includes <linux/autoconf.h>.
*
* If the user re-runs make *config, linux/autoconf.h will be
* regenerated. make notices that and will rebuild every file which
diff --git a/scripts/dtc/.gitignore b/scripts/dtc/.gitignore
new file mode 100644
index 000000000000..095acb49a374
--- /dev/null
+++ b/scripts/dtc/.gitignore
@@ -0,0 +1,5 @@
+dtc
+dtc-lexer.lex.c
+dtc-parser.tab.c
+dtc-parser.tab.h
+
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index ed591e9b7d1d..b52d340d759d 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1426,6 +1426,8 @@ sub dump_struct($$) {
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
$nested =~ s/\/\*.*?\*\///gos;
+ # strip kmemcheck_bitfield_{begin,end}.*;
+ $members =~ s/kmemcheck_bitfield_.*?;//gos;
create_parameterlist($members, ';', $file);
check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
@@ -1468,8 +1470,6 @@ sub dump_enum($$) {
}
}
- # strip kmemcheck_bitfield_{begin,end}.*;
- $members =~ s/kmemcheck_bitfield_.*?;//gos;
output_declaration($declaration_name,
'enum',
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index aadc5223dcdb..ecf9c7dc1825 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -334,8 +334,6 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
deps_drivers/net/dummy.o := \
drivers/net/dummy.c \
$(wildcard include/config/net/fastroute.h) \
- include/linux/config.h \
- $(wildcard include/config/h.h) \
include/linux/module.h \
Sum all files in the same dir or subdirs.
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
new file mode 100644
index 000000000000..47a1f9ae0ede
--- /dev/null
+++ b/scripts/module-common.lds
@@ -0,0 +1,8 @@
+/*
+ * Common module linker script, always used when linking a module.
+ * Archs are free to supply their own linker scripts. ld will
+ * combine them automatically.
+ */
+SECTIONS {
+ /DISCARD/ : { *(.discard) }
+}
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 01c2d13dd020..b19f1f4962e3 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -16,6 +16,8 @@ create_package() {
local pname="$1" pdir="$2"
cp debian/copyright "$pdir/usr/share/doc/$pname/"
+ cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian"
+ gzip -9 "$pdir/usr/share/doc/$pname/changelog.Debian"
# Fix ownership and permissions
chown -R root:root "$pdir"
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 64f5ddb09ea6..5c113123ed9f 100644
--- a/scripts/pnmtologo.c
+++ b/scripts/pnmtologo.c
@@ -237,7 +237,7 @@ static void write_header(void)
fprintf(out, " * Linux logo %s\n", logoname);
fputs(" */\n\n", out);
fputs("#include <linux/linux_logo.h>\n\n", out);
- fprintf(out, "static const unsigned char %s_data[] __initconst = {\n",
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
logoname);
}
@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
fputs("\n};\n\n", out);
/* write logo clut */
- fprintf(out, "static const unsigned char %s_clut[] __initconst = {\n",
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
logoname);
write_hex_cnt = 0;
for (i = 0; i < logo_clutsize; i++) {
diff --git a/security/capability.c b/security/capability.c
index 21b6cead6a8e..f218dd361647 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -863,7 +863,7 @@ struct security_operations default_security_ops = {
void security_fixup_ops(struct security_operations *ops)
{
- set_to_cap_if_null(ops, ptrace_may_access);
+ set_to_cap_if_null(ops, ptrace_access_check);
set_to_cap_if_null(ops, ptrace_traceme);
set_to_cap_if_null(ops, capget);
set_to_cap_if_null(ops, capset);
diff --git a/security/commoncap.c b/security/commoncap.c
index 48b7e0228fa3..aa97704564d4 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -101,7 +101,7 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
}
/**
- * cap_ptrace_may_access - Determine whether the current process may access
+ * cap_ptrace_access_check - Determine whether the current process may access
* another
* @child: The process to be accessed
* @mode: The mode of attachment.
@@ -109,7 +109,7 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
-int cap_ptrace_may_access(struct task_struct *child, unsigned int mode)
+int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 6f611874d10e..101c512564ec 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -238,7 +238,34 @@ out:
}
/*
- * ima_opens_get - increment file counts
+ * ima_counts_put - decrement file counts
+ *
+ * File counts are incremented in ima_path_check. On file open
+ * error, such as ETXTBSY, decrement the counts to prevent
+ * unnecessary imbalance messages.
+ */
+void ima_counts_put(struct path *path, int mask)
+{
+ struct inode *inode = path->dentry->d_inode;
+ struct ima_iint_cache *iint;
+
+ if (!ima_initialized || !S_ISREG(inode->i_mode))
+ return;
+ iint = ima_iint_find_insert_get(inode);
+ if (!iint)
+ return;
+
+ mutex_lock(&iint->mutex);
+ iint->opencount--;
+ if ((mask & MAY_WRITE) || (mask == 0))
+ iint->writecount--;
+ else if (mask & (MAY_READ | MAY_EXEC))
+ iint->readcount--;
+ mutex_unlock(&iint->mutex);
+}
+
+/*
+ * ima_counts_get - increment file counts
*
* - for IPC shm and shmat file.
* - for nfsd exported files.
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 7ec94314ac0c..a0880e9c8e05 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -134,7 +134,8 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
+ entry->template.file_name,
op, audit_cause, result, audit_info);
return result;
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 769f9bdfd2b3..39793c774f33 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -120,6 +120,7 @@ static int proc_keys_open(struct inode *inode, struct file *file)
}
static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_serial_lock)
{
struct rb_node *_p;
loff_t pos = *_pos;
@@ -144,6 +145,7 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
}
static void proc_keys_stop(struct seq_file *p, void *v)
+ __releases(key_serial_lock)
{
spin_unlock(&key_serial_lock);
}
@@ -257,6 +259,7 @@ static int proc_key_users_open(struct inode *inode, struct file *file)
}
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_user_lock)
{
struct rb_node *_p;
loff_t pos = *_pos;
@@ -281,6 +284,7 @@ static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
}
static void proc_key_users_stop(struct seq_file *p, void *v)
+ __releases(key_user_lock)
{
spin_unlock(&key_user_lock);
}
diff --git a/security/security.c b/security/security.c
index dc7674fbfc7a..4501c5e1f988 100644
--- a/security/security.c
+++ b/security/security.c
@@ -124,9 +124,9 @@ int register_security(struct security_operations *ops)
/* Security operations */
-int security_ptrace_may_access(struct task_struct *child, unsigned int mode)
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
- return security_ops->ptrace_may_access(child, mode);
+ return security_ops->ptrace_access_check(child, mode);
}
int security_ptrace_traceme(struct task_struct *parent)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index b2ab60859832..236aaa2ea86d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -137,7 +137,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
* @tclass: target security class
* @av: access vector
*/
-void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
+static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
{
const char **common_pts = NULL;
u32 common_base = 0;
@@ -970,3 +970,9 @@ u32 avc_policy_seqno(void)
{
return avc_cache.latest_notif;
}
+
+void avc_disable(void)
+{
+ if (avc_node_cachep)
+ kmem_cache_destroy(avc_node_cachep);
+}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 15c2a08a66f1..2081055f6783 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1854,12 +1854,12 @@ static inline u32 open_file_to_av(struct file *file)
/* Hook functions begin here. */
-static int selinux_ptrace_may_access(struct task_struct *child,
+static int selinux_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int rc;
- rc = cap_ptrace_may_access(child, mode);
+ rc = cap_ptrace_access_check(child, mode);
if (rc)
return rc;
@@ -2938,11 +2938,6 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
const struct cred *cred = current_cred();
struct inode *inode = file->f_path.dentry->d_inode;
- if (!mask) {
- /* No permission to check. Existence test. */
- return 0;
- }
-
/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
mask |= MAY_APPEND;
@@ -2953,10 +2948,20 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
static int selinux_file_permission(struct file *file, int mask)
{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct file_security_struct *fsec = file->f_security;
+ struct inode_security_struct *isec = inode->i_security;
+ u32 sid = current_sid();
+
if (!mask)
/* No permission to check. Existence test. */
return 0;
+ if (sid == fsec->sid && fsec->isid == isec->sid &&
+ fsec->pseqno == avc_policy_seqno())
+ /* No change since dentry_open check. */
+ return 0;
+
return selinux_revalidate_file_permission(file, mask);
}
@@ -5310,7 +5315,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
static struct security_operations selinux_ops = {
.name = "selinux",
- .ptrace_may_access = selinux_ptrace_may_access,
+ .ptrace_access_check = selinux_ptrace_access_check,
.ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
.capset = selinux_capset,
@@ -5678,6 +5683,9 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
+ /* Try to destroy the avc node cache */
+ avc_disable();
+
/* Reset security_ops to the secondary module, dummy or capability. */
security_ops = secondary_ops;
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index d12ff1a9c0aa..ae4c3a0e2c1a 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -127,13 +127,13 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
u32 events, u32 ssid, u32 tsid,
u16 tclass, u32 perms);
-/* Shows permission in human readable form */
-void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av);
-
/* Exported to selinuxfs */
int avc_get_hash_stats(char *page);
extern unsigned int avc_cache_threshold;
+/* Attempt to free avc node cache */
+void avc_disable(void);
+
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
#endif
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 500e6f78e115..ff17820d35ec 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -22,6 +22,11 @@
*
* Added validation of kernel classes and permissions
*
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Added support for bounds domain and audit messaged on masked permissions
+ *
+ * Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
* Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
@@ -279,6 +284,95 @@ mls_ops:
}
/*
+ * security_dump_masked_av - dumps masked permissions during
+ * security_compute_av due to RBAC, MLS/Constraint and Type bounds.
+ */
+static int dump_masked_av_helper(void *k, void *d, void *args)
+{
+ struct perm_datum *pdatum = d;
+ char **permission_names = args;
+
+ BUG_ON(pdatum->value < 1 || pdatum->value > 32);
+
+ permission_names[pdatum->value - 1] = (char *)k;
+
+ return 0;
+}
+
+static void security_dump_masked_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ u32 permissions,
+ const char *reason)
+{
+ struct common_datum *common_dat;
+ struct class_datum *tclass_dat;
+ struct audit_buffer *ab;
+ char *tclass_name;
+ char *scontext_name = NULL;
+ char *tcontext_name = NULL;
+ char *permission_names[32];
+ int index, length;
+ bool need_comma = false;
+
+ if (!permissions)
+ return;
+
+ tclass_name = policydb.p_class_val_to_name[tclass - 1];
+ tclass_dat = policydb.class_val_to_struct[tclass - 1];
+ common_dat = tclass_dat->comdatum;
+
+ /* init permission_names */
+ if (common_dat &&
+ hashtab_map(common_dat->permissions.table,
+ dump_masked_av_helper, permission_names) < 0)
+ goto out;
+
+ if (hashtab_map(tclass_dat->permissions.table,
+ dump_masked_av_helper, permission_names) < 0)
+ goto out;
+
+ /* get scontext/tcontext in text form */
+ if (context_struct_to_string(scontext,
+ &scontext_name, &length) < 0)
+ goto out;
+
+ if (context_struct_to_string(tcontext,
+ &tcontext_name, &length) < 0)
+ goto out;
+
+ /* audit a message */
+ ab = audit_log_start(current->audit_context,
+ GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ if (!ab)
+ goto out;
+
+ audit_log_format(ab, "op=security_compute_av reason=%s "
+ "scontext=%s tcontext=%s tclass=%s perms=",
+ reason, scontext_name, tcontext_name, tclass_name);
+
+ for (index = 0; index < 32; index++) {
+ u32 mask = (1 << index);
+
+ if ((mask & permissions) == 0)
+ continue;
+
+ audit_log_format(ab, "%s%s",
+ need_comma ? "," : "",
+ permission_names[index]
+ ? permission_names[index] : "????");
+ need_comma = true;
+ }
+ audit_log_end(ab);
+out:
+ /* release scontext/tcontext */
+ kfree(tcontext_name);
+ kfree(scontext_name);
+
+ return;
+}
+
+/*
* security_boundary_permission - drops violated permissions
* on boundary constraint.
*/
@@ -347,28 +441,12 @@ static void type_attribute_bounds_av(struct context *scontext,
}
if (masked) {
- struct audit_buffer *ab;
- char *stype_name
- = policydb.p_type_val_to_name[source->value - 1];
- char *ttype_name
- = policydb.p_type_val_to_name[target->value - 1];
- char *tclass_name
- = policydb.p_class_val_to_name[tclass - 1];
-
/* mask violated permissions */
avd->allowed &= ~masked;
- /* notice to userspace via audit message */
- ab = audit_log_start(current->audit_context,
- GFP_ATOMIC, AUDIT_SELINUX_ERR);
- if (!ab)
- return;
-
- audit_log_format(ab, "av boundary violation: "
- "source=%s target=%s tclass=%s",
- stype_name, ttype_name, tclass_name);
- avc_dump_av(ab, tclass, masked);
- audit_log_end(ab);
+ /* audit masked permissions */
+ security_dump_masked_av(scontext, tcontext,
+ tclass, masked, "bounds");
}
}
@@ -480,7 +558,7 @@ static int context_struct_compute_av(struct context *scontext,
if ((constraint->permissions & (avd->allowed)) &&
!constraint_expr_eval(scontext, tcontext, NULL,
constraint->expr)) {
- avd->allowed = (avd->allowed) & ~(constraint->permissions);
+ avd->allowed &= ~(constraint->permissions);
}
constraint = constraint->next;
}
@@ -499,8 +577,8 @@ static int context_struct_compute_av(struct context *scontext,
break;
}
if (!ra)
- avd->allowed = (avd->allowed) & ~(PROCESS__TRANSITION |
- PROCESS__DYNTRANSITION);
+ avd->allowed &= ~(PROCESS__TRANSITION |
+ PROCESS__DYNTRANSITION);
}
/*
@@ -687,6 +765,26 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
}
index = type->bounds;
}
+
+ if (rc) {
+ char *old_name = NULL;
+ char *new_name = NULL;
+ int length;
+
+ if (!context_struct_to_string(old_context,
+ &old_name, &length) &&
+ !context_struct_to_string(new_context,
+ &new_name, &length)) {
+ audit_log(current->audit_context,
+ GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "op=security_bounded_transition "
+ "result=denied "
+ "oldcontext=%s newcontext=%s",
+ old_name, new_name);
+ }
+ kfree(new_name);
+ kfree(old_name);
+ }
out:
read_unlock(&policy_rwlock);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0023182078c7..1c9bdbcbe3d2 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -91,7 +91,7 @@ struct inode_smack *new_inode_smack(char *smack)
*/
/**
- * smack_ptrace_may_access - Smack approval on PTRACE_ATTACH
+ * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
* @mode: ptrace attachment mode
*
@@ -99,13 +99,13 @@ struct inode_smack *new_inode_smack(char *smack)
*
* Do the capability checks, and require read and write.
*/
-static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode)
+static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
int rc;
struct smk_audit_info ad;
char *sp, *tsp;
- rc = cap_ptrace_may_access(ctp, mode);
+ rc = cap_ptrace_access_check(ctp, mode);
if (rc != 0)
return rc;
@@ -3032,7 +3032,7 @@ static void smack_release_secctx(char *secdata, u32 seclen)
struct security_operations smack_ops = {
.name = "smack",
- .ptrace_may_access = smack_ptrace_may_access,
+ .ptrace_access_check = smack_ptrace_access_check,
.ptrace_traceme = smack_ptrace_traceme,
.syslog = smack_syslog,
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index fdd1f4b8c448..3c8bd8ee0b95 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1285,6 +1285,36 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
}
/**
+ * tomoyo_delete_domain - Delete a domain.
+ *
+ * @domainname: The name of domain.
+ *
+ * Returns 0.
+ */
+static int tomoyo_delete_domain(char *domainname)
+{
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_path_info name;
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+ down_write(&tomoyo_domain_list_lock);
+ /* Is there an active domain? */
+ list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ /* Never delete tomoyo_kernel_domain */
+ if (domain == &tomoyo_kernel_domain)
+ continue;
+ if (domain->is_deleted ||
+ tomoyo_pathcmp(domain->domainname, &name))
+ continue;
+ domain->is_deleted = true;
+ break;
+ }
+ up_write(&tomoyo_domain_list_lock);
+ return 0;
+}
+
+/**
* tomoyo_write_domain_policy - Write domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 6d6ba09af457..31df541911f7 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -339,8 +339,6 @@ const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
const char *tomoyo_get_msg(const bool is_enforce);
/* Convert single path operation to operation name. */
const char *tomoyo_sp2keyword(const u8 operation);
-/* Delete a domain. */
-int tomoyo_delete_domain(char *data);
/* Create "alias" entry in exception policy. */
int tomoyo_write_alias_policy(char *data, const bool is_delete);
/*
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 1d8b16960576..fcf52accce2b 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -717,38 +717,6 @@ int tomoyo_write_alias_policy(char *data, const bool is_delete)
return tomoyo_update_alias_entry(data, cp, is_delete);
}
-/* Domain create/delete handler. */
-
-/**
- * tomoyo_delete_domain - Delete a domain.
- *
- * @domainname: The name of domain.
- *
- * Returns 0.
- */
-int tomoyo_delete_domain(char *domainname)
-{
- struct tomoyo_domain_info *domain;
- struct tomoyo_path_info name;
-
- name.name = domainname;
- tomoyo_fill_path_info(&name);
- down_write(&tomoyo_domain_list_lock);
- /* Is there an active domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- /* Never delete tomoyo_kernel_domain */
- if (domain == &tomoyo_kernel_domain)
- continue;
- if (domain->is_deleted ||
- tomoyo_pathcmp(domain->domainname, &name))
- continue;
- domain->is_deleted = true;
- break;
- }
- up_write(&tomoyo_domain_list_lock);
- return 0;
-}
-
/**
* tomoyo_find_or_assign_new_domain - Create a domain.
*
@@ -818,13 +786,11 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
/**
* tomoyo_find_next_domain - Find a domain.
*
- * @bprm: Pointer to "struct linux_binprm".
- * @next_domain: Pointer to pointer to "struct tomoyo_domain_info".
+ * @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_find_next_domain(struct linux_binprm *bprm,
- struct tomoyo_domain_info **next_domain)
+int tomoyo_find_next_domain(struct linux_binprm *bprm)
{
/*
* This function assumes that the size of buffer returned by
@@ -946,9 +912,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm,
tomoyo_set_domain_flag(old_domain, false,
TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
out:
+ if (!domain)
+ domain = old_domain;
+ bprm->cred->security = domain;
tomoyo_free(real_program_name);
tomoyo_free(symlink_program_name);
- *next_domain = domain ? domain : old_domain;
tomoyo_free(tmp);
return retval;
}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 3194d09fe0f4..35a13e7915e4 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -61,14 +61,8 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain) {
- struct tomoyo_domain_info *next_domain = NULL;
- int retval = tomoyo_find_next_domain(bprm, &next_domain);
-
- if (!retval)
- bprm->cred->security = next_domain;
- return retval;
- }
+ if (!domain)
+ return tomoyo_find_next_domain(bprm);
/*
* Read permission is checked against interpreters using next domain.
* '1' is the result of open_to_namei_flags(O_RDONLY).
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
index 0fd588a629cf..cd6ba0bf7069 100644
--- a/security/tomoyo/tomoyo.h
+++ b/security/tomoyo/tomoyo.h
@@ -31,8 +31,7 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
struct path *path2);
int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
struct file *filp);
-int tomoyo_find_next_domain(struct linux_binprm *bprm,
- struct tomoyo_domain_info **next_domain);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
/* Index numbers for Access Controls. */
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 257624bd1997..3b9b550109cb 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -353,7 +353,8 @@ static void master_free(struct snd_kcontrol *kcontrol)
*
* The optional argument @tlv can be used to specify the TLV information
* for dB scale of the master control. It should be a single element
- * with #SNDRV_CTL_TLVT_DB_SCALE type, and should be the max 0dB.
+ * with #SNDRV_CTL_TLVT_DB_SCALE, #SNDRV_CTL_TLV_DB_MINMAX or
+ * #SNDRV_CTL_TLVT_DB_MINMAX_MUTE type, and should be the max 0dB.
*/
struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
const unsigned int *tlv)
@@ -384,7 +385,10 @@ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
kctl->private_free = master_free;
/* additional (constant) TLV read */
- if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
+ if (tlv &&
+ (tlv[0] == SNDRV_CTL_TLVT_DB_SCALE ||
+ tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX ||
+ tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX_MUTE)) {
kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
memcpy(master->tlv, tlv, sizeof(master->tlv));
kctl->tlv.p = master->tlv;
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index de83608719ea..33e63faf6aa1 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -1,5 +1,5 @@
/*
- * Driver for C-Media's CMI8330 soundcards.
+ * Driver for C-Media's CMI8330 and CMI8329 soundcards.
* Copyright (c) by George Talusan <gstalusan@uwaterloo.ca>
* http://www.undergrad.math.uwaterloo.ca/~gstalusa
*
@@ -35,7 +35,7 @@
*
* This card has two mixers and two PCM devices. I've cheesed it such
* that recording and playback can be done through the same device.
- * The driver "magically" routes the capturing to the CMI8330 codec,
+ * The driver "magically" routes the capturing to the AD1848 codec,
* and playback to the SB16 codec. This allows for full-duplex mode
* to some extent.
* The utilities in alsa-utils are aware of both devices, so passing
@@ -64,7 +64,7 @@
/*
*/
MODULE_AUTHOR("George Talusan <gstalusan@uwaterloo.ca>");
-MODULE_DESCRIPTION("C-Media CMI8330");
+MODULE_DESCRIPTION("C-Media CMI8330/CMI8329");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8330,isapnp:{CMI0001,@@@0001,@X@0001}}}");
@@ -86,38 +86,38 @@ static long mpuport[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
static int mpuirq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for CMI8330 soundcard.");
+MODULE_PARM_DESC(index, "Index value for CMI8330/CMI8329 soundcard.");
module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for CMI8330 soundcard.");
+MODULE_PARM_DESC(id, "ID string for CMI8330/CMI8329 soundcard.");
module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable CMI8330 soundcard.");
+MODULE_PARM_DESC(enable, "Enable CMI8330/CMI8329 soundcard.");
#ifdef CONFIG_PNP
module_param_array(isapnp, bool, NULL, 0444);
MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
#endif
module_param_array(sbport, long, NULL, 0444);
-MODULE_PARM_DESC(sbport, "Port # for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbport, "Port # for CMI8330/CMI8329 SB driver.");
module_param_array(sbirq, int, NULL, 0444);
-MODULE_PARM_DESC(sbirq, "IRQ # for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbirq, "IRQ # for CMI8330/CMI8329 SB driver.");
module_param_array(sbdma8, int, NULL, 0444);
-MODULE_PARM_DESC(sbdma8, "DMA8 for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbdma8, "DMA8 for CMI8330/CMI8329 SB driver.");
module_param_array(sbdma16, int, NULL, 0444);
-MODULE_PARM_DESC(sbdma16, "DMA16 for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbdma16, "DMA16 for CMI8330/CMI8329 SB driver.");
module_param_array(wssport, long, NULL, 0444);
-MODULE_PARM_DESC(wssport, "Port # for CMI8330 WSS driver.");
+MODULE_PARM_DESC(wssport, "Port # for CMI8330/CMI8329 WSS driver.");
module_param_array(wssirq, int, NULL, 0444);
-MODULE_PARM_DESC(wssirq, "IRQ # for CMI8330 WSS driver.");
+MODULE_PARM_DESC(wssirq, "IRQ # for CMI8330/CMI8329 WSS driver.");
module_param_array(wssdma, int, NULL, 0444);
-MODULE_PARM_DESC(wssdma, "DMA for CMI8330 WSS driver.");
+MODULE_PARM_DESC(wssdma, "DMA for CMI8330/CMI8329 WSS driver.");
module_param_array(fmport, long, NULL, 0444);
-MODULE_PARM_DESC(fmport, "FM port # for CMI8330 driver.");
+MODULE_PARM_DESC(fmport, "FM port # for CMI8330/CMI8329 driver.");
module_param_array(mpuport, long, NULL, 0444);
-MODULE_PARM_DESC(mpuport, "MPU-401 port # for CMI8330 driver.");
+MODULE_PARM_DESC(mpuport, "MPU-401 port # for CMI8330/CMI8329 driver.");
module_param_array(mpuirq, int, NULL, 0444);
-MODULE_PARM_DESC(mpuirq, "IRQ # for CMI8330 MPU-401 port.");
+MODULE_PARM_DESC(mpuirq, "IRQ # for CMI8330/CMI8329 MPU-401 port.");
#ifdef CONFIG_PNP
static int isa_registered;
static int pnp_registered;
@@ -156,6 +156,11 @@ static unsigned char snd_cmi8330_image[((CMI8330_CDINGAIN)-16) + 1] =
typedef int (*snd_pcm_open_callback_t)(struct snd_pcm_substream *);
+enum card_type {
+ CMI8330,
+ CMI8329
+};
+
struct snd_cmi8330 {
#ifdef CONFIG_PNP
struct pnp_dev *cap;
@@ -172,11 +177,14 @@ struct snd_cmi8330 {
snd_pcm_open_callback_t open;
void *private_data; /* sb or wss */
} streams[2];
+
+ enum card_type type;
};
#ifdef CONFIG_PNP
static struct pnp_card_device_id snd_cmi8330_pnpids[] = {
+ { .id = "CMI0001", .devs = { { "@X@0001" }, { "@@@0001" }, { "@H@0001" }, { "A@@0001" } } },
{ .id = "CMI0001", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } } },
{ .id = "" }
};
@@ -304,7 +312,7 @@ static int __devinit snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330
unsigned int idx;
int err;
- strcpy(card->mixername, "CMI8330/C3D");
+ strcpy(card->mixername, (acard->type == CMI8329) ? "CMI8329" : "CMI8330/C3D");
for (idx = 0; idx < ARRAY_SIZE(snd_cmi8330_controls); idx++) {
err = snd_ctl_add(card,
@@ -329,6 +337,9 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
struct pnp_dev *pdev;
int err;
+ /* CMI8329 has a device with ID A@@0001, CMI8330 does not */
+ acard->type = (id->devs[3].id[0]) ? CMI8329 : CMI8330;
+
acard->cap = pnp_request_card_device(card, id->devs[0].id, NULL);
if (acard->cap == NULL)
return -EBUSY;
@@ -338,41 +349,43 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
return -EBUSY;
acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL);
- if (acard->play == NULL)
+ if (acard->mpu == NULL)
return -EBUSY;
pdev = acard->cap;
err = pnp_activate_dev(pdev);
if (err < 0) {
- snd_printk(KERN_ERR "CMI8330/C3D PnP configure failure\n");
+ snd_printk(KERN_ERR "AD1848 PnP configure failure\n");
return -EBUSY;
}
wssport[dev] = pnp_port_start(pdev, 0);
wssdma[dev] = pnp_dma(pdev, 0);
wssirq[dev] = pnp_irq(pdev, 0);
- fmport[dev] = pnp_port_start(pdev, 1);
+ if (acard->type == CMI8330)
+ fmport[dev] = pnp_port_start(pdev, 1);
/* allocate SB16 resources */
pdev = acard->play;
err = pnp_activate_dev(pdev);
if (err < 0) {
- snd_printk(KERN_ERR "CMI8330/C3D (SB16) PnP configure failure\n");
+ snd_printk(KERN_ERR "SB16 PnP configure failure\n");
return -EBUSY;
}
sbport[dev] = pnp_port_start(pdev, 0);
sbdma8[dev] = pnp_dma(pdev, 0);
sbdma16[dev] = pnp_dma(pdev, 1);
sbirq[dev] = pnp_irq(pdev, 0);
+ if (acard->type == CMI8329)
+ fmport[dev] = pnp_port_start(pdev, 1);
/* allocate MPU-401 resources */
pdev = acard->mpu;
err = pnp_activate_dev(pdev);
if (err < 0) {
- snd_printk(KERN_ERR
- "CMI8330/C3D (MPU-401) PnP configure failure\n");
+ snd_printk(KERN_ERR "MPU-401 PnP configure failure\n");
return -EBUSY;
}
mpuport[dev] = pnp_port_start(pdev, 0);
@@ -430,9 +443,9 @@ static int __devinit snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *
snd_cmi8330_capture_open
};
- if ((err = snd_pcm_new(card, "CMI8330", 0, 1, 1, &pcm)) < 0)
+ if ((err = snd_pcm_new(card, (chip->type == CMI8329) ? "CMI8329" : "CMI8330", 0, 1, 1, &pcm)) < 0)
return err;
- strcpy(pcm->name, "CMI8330");
+ strcpy(pcm->name, (chip->type == CMI8329) ? "CMI8329" : "CMI8330");
pcm->private_data = chip;
/* SB16 */
@@ -527,11 +540,11 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
wssdma[dev], -1,
WSS_HW_DETECT, 0, &acard->wss);
if (err < 0) {
- snd_printk(KERN_ERR PFX "(CMI8330) device busy??\n");
+ snd_printk(KERN_ERR PFX "AD1848 device busy??\n");
return err;
}
if (acard->wss->hardware != WSS_HW_CMI8330) {
- snd_printk(KERN_ERR PFX "(CMI8330) not found during probe\n");
+ snd_printk(KERN_ERR PFX "AD1848 not found during probe\n");
return -ENODEV;
}
@@ -541,11 +554,11 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
sbdma8[dev],
sbdma16[dev],
SB_HW_AUTO, &acard->sb)) < 0) {
- snd_printk(KERN_ERR PFX "(SB16) device busy??\n");
+ snd_printk(KERN_ERR PFX "SB16 device busy??\n");
return err;
}
if (acard->sb->hardware != SB_HW_16) {
- snd_printk(KERN_ERR PFX "(SB16) not found during probe\n");
+ snd_printk(KERN_ERR PFX "SB16 not found during probe\n");
return err;
}
@@ -585,8 +598,8 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
mpuport[dev]);
}
- strcpy(card->driver, "CMI8330/C3D");
- strcpy(card->shortname, "C-Media CMI8330/C3D");
+ strcpy(card->driver, (acard->type == CMI8329) ? "CMI8329" : "CMI8330/C3D");
+ strcpy(card->shortname, (acard->type == CMI8329) ? "C-Media CMI8329" : "C-Media CMI8330/C3D");
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
card->shortname,
acard->wss->port,
diff --git a/sound/oss/kahlua.c b/sound/oss/kahlua.c
index c180598f1710..89466b056be7 100644
--- a/sound/oss/kahlua.c
+++ b/sound/oss/kahlua.c
@@ -199,7 +199,7 @@ MODULE_LICENSE("GPL");
*/
static struct pci_device_id id_tbl[] = {
- { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO), 0 },
{ }
};
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c
index 6c0a770ed054..1b2316f35b1f 100644
--- a/sound/oss/mpu401.c
+++ b/sound/oss/mpu401.c
@@ -926,31 +926,21 @@ static struct midi_operations mpu401_midi_operations[MAX_MIDI_DEV];
static void mpu401_chk_version(int n, struct mpu_config *devc)
{
int tmp;
- unsigned long flags;
devc->version = devc->revision = 0;
- spin_lock_irqsave(&devc->lock,flags);
- if ((tmp = mpu_cmd(n, 0xAC, 0)) < 0)
- {
- spin_unlock_irqrestore(&devc->lock,flags);
+ tmp = mpu_cmd(n, 0xAC, 0);
+ if (tmp < 0)
return;
- }
if ((tmp & 0xf0) > 0x20) /* Why it's larger than 2.x ??? */
- {
- spin_unlock_irqrestore(&devc->lock,flags);
return;
- }
devc->version = tmp;
- if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0)
- {
+ if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0) {
devc->version = 0;
- spin_unlock_irqrestore(&devc->lock,flags);
return;
}
devc->revision = tmp;
- spin_unlock_irqrestore(&devc->lock,flags);
}
int attach_mpu401(struct address_info *hw_config, struct module *owner)
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 71515ddb4593..d6752dff2a44 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -287,10 +287,10 @@ struct atiixp {
/*
*/
static struct pci_device_id snd_atiixp_ids[] = {
- { 0x1002, 0x4341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB200 */
- { 0x1002, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB300 */
- { 0x1002, 0x4370, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB400 */
- { 0x1002, 0x4382, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB600 */
+ { PCI_VDEVICE(ATI, 0x4341), 0 }, /* SB200 */
+ { PCI_VDEVICE(ATI, 0x4361), 0 }, /* SB300 */
+ { PCI_VDEVICE(ATI, 0x4370), 0 }, /* SB400 */
+ { PCI_VDEVICE(ATI, 0x4382), 0 }, /* SB600 */
{ 0, }
};
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index c3136cccc559..e7e147bf8eb2 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -262,8 +262,8 @@ struct atiixp_modem {
/*
*/
static struct pci_device_id snd_atiixp_ids[] = {
- { 0x1002, 0x434d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB200 */
- { 0x1002, 0x4378, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB400 */
+ { PCI_VDEVICE(ATI, 0x434d), 0 }, /* SB200 */
+ { PCI_VDEVICE(ATI, 0x4378), 0 }, /* SB400 */
{ 0, }
};
diff --git a/sound/pci/au88x0/au8810.c b/sound/pci/au88x0/au8810.c
index fce22c7af0ea..c0e8c6b295cb 100644
--- a/sound/pci/au88x0/au8810.c
+++ b/sound/pci/au88x0/au8810.c
@@ -1,8 +1,7 @@
#include "au8810.h"
#include "au88x0.h"
static struct pci_device_id snd_vortex_ids[] = {
- {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1,},
+ {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE), 1,},
{0,}
};
diff --git a/sound/pci/au88x0/au8820.c b/sound/pci/au88x0/au8820.c
index d1fbcce07257..a6527330df58 100644
--- a/sound/pci/au88x0/au8820.c
+++ b/sound/pci/au88x0/au8820.c
@@ -1,8 +1,7 @@
#include "au8820.h"
#include "au88x0.h"
static struct pci_device_id snd_vortex_ids[] = {
- {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1), 0,},
{0,}
};
diff --git a/sound/pci/au88x0/au8830.c b/sound/pci/au88x0/au8830.c
index d4f2717c14fb..6c702ad4352a 100644
--- a/sound/pci/au88x0/au8830.c
+++ b/sound/pci/au88x0/au8830.c
@@ -1,8 +1,7 @@
#include "au8830.h"
#include "au88x0.h"
static struct pci_device_id snd_vortex_ids[] = {
- {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2), 0,},
{0,}
};
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 57b992a5c057..f24bf1ecb36d 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1876,7 +1876,7 @@ static int snd_ca0106_resume(struct pci_dev *pci)
// PCI IDs
static struct pci_device_id snd_ca0106_ids[] = {
- { 0x1102, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Audigy LS or Live 24bit */
+ { PCI_VDEVICE(CREATIVE, 0x0007), 0 }, /* Audigy LS or Live 24bit */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_ca0106_ids);
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 449fe02f666e..ddcd4a9fd7e6 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2797,11 +2797,11 @@ static inline void snd_cmipci_proc_init(struct cmipci *cm) {}
static struct pci_device_id snd_cmipci_ids[] = {
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_AL, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A), 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B), 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738), 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B), 0},
+ {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738), 0},
{0,},
};
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index f6286f84a221..e2e0359bb056 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -495,7 +495,7 @@ struct cs4281 {
static irqreturn_t snd_cs4281_interrupt(int irq, void *dev_id);
static struct pci_device_id snd_cs4281_ids[] = {
- { 0x1013, 0x6005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4281 */
+ { PCI_VDEVICE(CIRRUS, 0x6005), 0, }, /* CS4281 */
{ 0, }
};
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index c9b3e3d48cbc..033aec430117 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -65,9 +65,9 @@ module_param_array(mmap_valid, bool, NULL, 0444);
MODULE_PARM_DESC(mmap_valid, "Support OSS mmap.");
static struct pci_device_id snd_cs46xx_ids[] = {
- { 0x1013, 0x6001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4280 */
- { 0x1013, 0x6003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4612 */
- { 0x1013, 0x6004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4615 */
+ { PCI_VDEVICE(CIRRUS, 0x6001), 0, }, /* CS4280 */
+ { PCI_VDEVICE(CIRRUS, 0x6003), 0, }, /* CS4612 */
+ { PCI_VDEVICE(CIRRUS, 0x6004), 0, }, /* CS4615 */
{ 0, }
};
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index c7f3b994101c..168af67d938e 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -77,9 +77,9 @@ MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
* Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
*/
static struct pci_device_id snd_emu10k1_ids[] = {
- { 0x1102, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* EMU10K1 */
- { 0x1102, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, /* Audigy */
- { 0x1102, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, /* Audigy 2 Value SB0400 */
+ { PCI_VDEVICE(CREATIVE, 0x0002), 0 }, /* EMU10K1 */
+ { PCI_VDEVICE(CREATIVE, 0x0004), 1 }, /* Audigy */
+ { PCI_VDEVICE(CREATIVE, 0x0008), 1 }, /* Audigy 2 Value SB0400 */
{ 0, }
};
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 4d3ad793e98f..36e08bd2b3cc 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1607,7 +1607,7 @@ static void __devexit snd_emu10k1x_remove(struct pci_dev *pci)
// PCI IDs
static struct pci_device_id snd_emu10k1x_ids[] = {
- { 0x1102, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Dell OEM version (EMU10K1) */
+ { PCI_VDEVICE(CREATIVE, 0x0006), 0 }, /* Dell OEM version (EMU10K1) */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_emu10k1x_ids);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 18f4d1e98c46..2b82c5c723e1 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -445,12 +445,12 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id);
static struct pci_device_id snd_audiopci_ids[] = {
#ifdef CHIP1370
- { 0x1274, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1370 */
+ { PCI_VDEVICE(ENSONIQ, 0x5000), 0, }, /* ES1370 */
#endif
#ifdef CHIP1371
- { 0x1274, 0x1371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1371 */
- { 0x1274, 0x5880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1373 - CT5880 */
- { 0x1102, 0x8938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Ectiva EV1938 */
+ { PCI_VDEVICE(ENSONIQ, 0x1371), 0, }, /* ES1371 */
+ { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */
+ { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */
#endif
{ 0, }
};
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index fbd2ac09aa34..820318ee62c1 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -244,7 +244,7 @@ struct es1938 {
static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id);
static struct pci_device_id snd_es1938_ids[] = {
- { 0x125d, 0x1969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Solo-1 */
+ { PCI_VDEVICE(ESS, 0x1969), 0, }, /* Solo-1 */
{ 0, }
};
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 04438f1d682d..b8a77f9b0827 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -46,6 +46,20 @@ config SND_HDA_INPUT_JACK
Say Y here to enable the jack plugging notification via
input layer.
+config SND_HDA_PATCH_LOADER
+ bool "Support initialization patch loading for HD-audio"
+ depends on EXPERIMENTAL
+ select FW_LOADER
+ select SND_HDA_HWDEP
+ select SND_HDA_RECONFIG
+ help
+ Say Y here to allow the HD-audio driver to load a pseudo
+ firmware file ("patch") for overriding the BIOS setup at
+ start up. The "patch" file can be specified via patch module
+ option, such as patch=hda-init.
+
+ This option turns on hwdep and reconfig features automatically.
+
config SND_HDA_CODEC_REALTEK
bool "Build Realtek HD-audio codec support"
default y
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 29272f2e95a0..08fe6592ad44 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -24,6 +24,7 @@
#include <linux/workqueue.h>
#include <sound/core.h>
#include "hda_beep.h"
+#include "hda_local.h"
enum {
DIGBEEP_HZ_STEP = 46875, /* 46.875 Hz */
@@ -115,6 +116,9 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
struct hda_beep *beep;
int err;
+ if (!snd_hda_get_bool_hint(codec, "beep"))
+ return 0; /* disabled explicitly */
+
beep = kzalloc(sizeof(*beep), GFP_KERNEL);
if (beep == NULL)
return -ENOMEM;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 462e2cedaa6a..94d848e98716 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -885,7 +885,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
* Returns 0 if successful, or a negative error code.
*/
int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
- int do_init, struct hda_codec **codecp)
+ struct hda_codec **codecp)
{
struct hda_codec *codec;
char component[31];
@@ -978,11 +978,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr
codec->afg ? codec->afg : codec->mfg,
AC_PWRST_D0);
- if (do_init) {
- err = snd_hda_codec_configure(codec);
- if (err < 0)
- goto error;
- }
snd_hda_codec_proc_new(codec);
snd_hda_create_hwdep(codec);
@@ -1036,6 +1031,7 @@ int snd_hda_codec_configure(struct hda_codec *codec)
err = init_unsol_queue(codec->bus);
return err;
}
+EXPORT_SYMBOL_HDA(snd_hda_codec_configure);
/**
* snd_hda_codec_setup_stream - set up the codec for streaming
@@ -2567,7 +2563,7 @@ unsigned int snd_hda_calc_stream_format(unsigned int rate,
case 20:
case 24:
case 32:
- if (maxbps >= 32)
+ if (maxbps >= 32 || format == SNDRV_PCM_FORMAT_FLOAT_LE)
val |= 0x40;
else if (maxbps >= 24)
val |= 0x30;
@@ -2694,11 +2690,12 @@ static int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
bps = 20;
}
}
- else if (streams == AC_SUPFMT_FLOAT32) {
- /* should be exclusive */
+ if (streams & AC_SUPFMT_FLOAT32) {
formats |= SNDRV_PCM_FMTBIT_FLOAT_LE;
- bps = 32;
- } else if (streams == AC_SUPFMT_AC3) {
+ if (!bps)
+ bps = 32;
+ }
+ if (streams == AC_SUPFMT_AC3) {
/* should be exclusive */
/* temporary hack: we have still no proper support
* for the direct AC3 stream...
@@ -3470,10 +3467,16 @@ int snd_hda_multi_out_analog_open(struct hda_codec *codec,
}
mutex_lock(&codec->spdif_mutex);
if (mout->share_spdif) {
- runtime->hw.rates &= mout->spdif_rates;
- runtime->hw.formats &= mout->spdif_formats;
- if (mout->spdif_maxbps < hinfo->maxbps)
- hinfo->maxbps = mout->spdif_maxbps;
+ if ((runtime->hw.rates & mout->spdif_rates) &&
+ (runtime->hw.formats & mout->spdif_formats)) {
+ runtime->hw.rates &= mout->spdif_rates;
+ runtime->hw.formats &= mout->spdif_formats;
+ if (mout->spdif_maxbps < hinfo->maxbps)
+ hinfo->maxbps = mout->spdif_maxbps;
+ } else {
+ mout->share_spdif = 0;
+ /* FIXME: need notify? */
+ }
}
mutex_unlock(&codec->spdif_mutex);
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index cad79efaabc9..72c997592eed 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -830,7 +830,8 @@ enum {
int snd_hda_bus_new(struct snd_card *card, const struct hda_bus_template *temp,
struct hda_bus **busp);
int snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
- int do_init, struct hda_codec **codecp);
+ struct hda_codec **codecp);
+int snd_hda_codec_configure(struct hda_codec *codec);
/*
* low level functions
@@ -938,6 +939,13 @@ static inline void snd_hda_power_down(struct hda_codec *codec) {}
#define snd_hda_codec_needs_resume(codec) 1
#endif
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+/*
+ * patch firmware
+ */
+int snd_hda_load_patch(struct hda_bus *bus, const char *patch);
+#endif
+
/*
* Codec modularization
*/
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 6812fbe80fa4..cc24e6721d74 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
+#include <linux/firmware.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -312,12 +313,8 @@ static ssize_t init_verbs_show(struct device *dev,
return len;
}
-static ssize_t init_verbs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int parse_init_verbs(struct hda_codec *codec, const char *buf)
{
- struct snd_hwdep *hwdep = dev_get_drvdata(dev);
- struct hda_codec *codec = hwdep->private_data;
struct hda_verb *v;
int nid, verb, param;
@@ -331,6 +328,18 @@ static ssize_t init_verbs_store(struct device *dev,
v->nid = nid;
v->verb = verb;
v->param = param;
+ return 0;
+}
+
+static ssize_t init_verbs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct snd_hwdep *hwdep = dev_get_drvdata(dev);
+ struct hda_codec *codec = hwdep->private_data;
+ int err = parse_init_verbs(codec, buf);
+ if (err < 0)
+ return err;
return count;
}
@@ -376,19 +385,15 @@ static void remove_trail_spaces(char *str)
#define MAX_HINTS 1024
-static ssize_t hints_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int parse_hints(struct hda_codec *codec, const char *buf)
{
- struct snd_hwdep *hwdep = dev_get_drvdata(dev);
- struct hda_codec *codec = hwdep->private_data;
char *key, *val;
struct hda_hint *hint;
while (isspace(*buf))
buf++;
if (!*buf || *buf == '#' || *buf == '\n')
- return count;
+ return 0;
if (*buf == '=')
return -EINVAL;
key = kstrndup_noeol(buf, 1024);
@@ -411,7 +416,7 @@ static ssize_t hints_store(struct device *dev,
kfree(hint->key);
hint->key = key;
hint->val = val;
- return count;
+ return 0;
}
/* allocate a new hint entry */
if (codec->hints.used >= MAX_HINTS)
@@ -424,6 +429,18 @@ static ssize_t hints_store(struct device *dev,
}
hint->key = key;
hint->val = val;
+ return 0;
+}
+
+static ssize_t hints_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct snd_hwdep *hwdep = dev_get_drvdata(dev);
+ struct hda_codec *codec = hwdep->private_data;
+ int err = parse_hints(codec, buf);
+ if (err < 0)
+ return err;
return count;
}
@@ -469,20 +486,24 @@ static ssize_t driver_pin_configs_show(struct device *dev,
#define MAX_PIN_CONFIGS 32
-static ssize_t user_pin_configs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int parse_user_pin_configs(struct hda_codec *codec, const char *buf)
{
- struct snd_hwdep *hwdep = dev_get_drvdata(dev);
- struct hda_codec *codec = hwdep->private_data;
int nid, cfg;
- int err;
if (sscanf(buf, "%i %i", &nid, &cfg) != 2)
return -EINVAL;
if (!nid)
return -EINVAL;
- err = snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg);
+ return snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg);
+}
+
+static ssize_t user_pin_configs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct snd_hwdep *hwdep = dev_get_drvdata(dev);
+ struct hda_codec *codec = hwdep->private_data;
+ int err = parse_user_pin_configs(codec, buf);
if (err < 0)
return err;
return count;
@@ -553,3 +574,180 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
EXPORT_SYMBOL_HDA(snd_hda_get_bool_hint);
#endif /* CONFIG_SND_HDA_RECONFIG */
+
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+
+/* parser mode */
+enum {
+ LINE_MODE_NONE,
+ LINE_MODE_CODEC,
+ LINE_MODE_MODEL,
+ LINE_MODE_PINCFG,
+ LINE_MODE_VERB,
+ LINE_MODE_HINT,
+ NUM_LINE_MODES,
+};
+
+static inline int strmatch(const char *a, const char *b)
+{
+ return strnicmp(a, b, strlen(b)) == 0;
+}
+
+/* parse the contents after the line "[codec]"
+ * accept only the line with three numbers, and assign the current codec
+ */
+static void parse_codec_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ unsigned int vendorid, subid, caddr;
+ struct hda_codec *codec;
+
+ *codecp = NULL;
+ if (sscanf(buf, "%i %i %i", &vendorid, &subid, &caddr) == 3) {
+ list_for_each_entry(codec, &bus->codec_list, list) {
+ if (codec->addr == caddr) {
+ *codecp = codec;
+ break;
+ }
+ }
+ }
+}
+
+/* parse the contents after the other command tags, [pincfg], [verb],
+ * [hint] and [model]
+ * just pass to the sysfs helper (only when any codec was specified)
+ */
+static void parse_pincfg_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ parse_user_pin_configs(*codecp, buf);
+}
+
+static void parse_verb_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ parse_init_verbs(*codecp, buf);
+}
+
+static void parse_hint_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ parse_hints(*codecp, buf);
+}
+
+static void parse_model_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ kfree((*codecp)->modelname);
+ (*codecp)->modelname = kstrdup(buf, GFP_KERNEL);
+}
+
+struct hda_patch_item {
+ const char *tag;
+ void (*parser)(char *buf, struct hda_bus *bus, struct hda_codec **retc);
+};
+
+static struct hda_patch_item patch_items[NUM_LINE_MODES] = {
+ [LINE_MODE_CODEC] = { "[codec]", parse_codec_mode },
+ [LINE_MODE_MODEL] = { "[model]", parse_model_mode },
+ [LINE_MODE_VERB] = { "[verb]", parse_verb_mode },
+ [LINE_MODE_PINCFG] = { "[pincfg]", parse_pincfg_mode },
+ [LINE_MODE_HINT] = { "[hint]", parse_hint_mode },
+};
+
+/* check the line starting with '[' -- change the parser mode accodingly */
+static int parse_line_mode(char *buf, struct hda_bus *bus)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(patch_items); i++) {
+ if (!patch_items[i].tag)
+ continue;
+ if (strmatch(buf, patch_items[i].tag))
+ return i;
+ }
+ return LINE_MODE_NONE;
+}
+
+/* copy one line from the buffer in fw, and update the fields in fw
+ * return zero if it reaches to the end of the buffer, or non-zero
+ * if successfully copied a line
+ *
+ * the spaces at the beginning and the end of the line are stripped
+ */
+static int get_line_from_fw(char *buf, int size, struct firmware *fw)
+{
+ int len;
+ const char *p = fw->data;
+ while (isspace(*p) && fw->size) {
+ p++;
+ fw->size--;
+ }
+ if (!fw->size)
+ return 0;
+ if (size < fw->size)
+ size = fw->size;
+
+ for (len = 0; len < fw->size; len++) {
+ if (!*p)
+ break;
+ if (*p == '\n') {
+ p++;
+ len++;
+ break;
+ }
+ if (len < size)
+ *buf++ = *p++;
+ }
+ *buf = 0;
+ fw->size -= len;
+ fw->data = p;
+ remove_trail_spaces(buf);
+ return 1;
+}
+
+/*
+ * load a "patch" firmware file and parse it
+ */
+int snd_hda_load_patch(struct hda_bus *bus, const char *patch)
+{
+ int err;
+ const struct firmware *fw;
+ struct firmware tmp;
+ char buf[128];
+ struct hda_codec *codec;
+ int line_mode;
+ struct device *dev = bus->card->dev;
+
+ if (snd_BUG_ON(!dev))
+ return -ENODEV;
+ err = request_firmware(&fw, patch, dev);
+ if (err < 0) {
+ printk(KERN_ERR "hda-codec: Cannot load the patch '%s'\n",
+ patch);
+ return err;
+ }
+
+ tmp = *fw;
+ line_mode = LINE_MODE_NONE;
+ codec = NULL;
+ while (get_line_from_fw(buf, sizeof(buf) - 1, &tmp)) {
+ if (!*buf || *buf == '#' || *buf == '\n')
+ continue;
+ if (*buf == '[')
+ line_mode = parse_line_mode(buf, bus);
+ else if (patch_items[line_mode].parser)
+ patch_items[line_mode].parser(buf, bus, &codec);
+ }
+ release_firmware(fw);
+ return 0;
+}
+EXPORT_SYMBOL_HDA(snd_hda_load_patch);
+#endif /* CONFIG_SND_HDA_PATCH_LOADER */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4e9ea7080270..394ca553cd29 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -61,6 +61,9 @@ static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int probe_only[SNDRV_CARDS];
static int single_cmd;
static int enable_msi;
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+static char *patch[SNDRV_CARDS];
+#endif
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -84,6 +87,10 @@ MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs "
"(for debugging only).");
module_param(enable_msi, int, 0444);
MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)");
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+module_param_array(patch, charp, NULL, 0444);
+MODULE_PARM_DESC(patch, "Patch file for Intel HD audio interface.");
+#endif
#ifdef CONFIG_SND_HDA_POWER_SAVE
static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
@@ -1286,8 +1293,7 @@ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
[AZX_DRIVER_TERA] = 1,
};
-static int __devinit azx_codec_create(struct azx *chip, const char *model,
- int no_init)
+static int __devinit azx_codec_create(struct azx *chip, const char *model)
{
struct hda_bus_template bus_temp;
int c, codecs, err;
@@ -1346,7 +1352,7 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
struct hda_codec *codec;
- err = snd_hda_codec_new(chip->bus, c, !no_init, &codec);
+ err = snd_hda_codec_new(chip->bus, c, &codec);
if (err < 0)
continue;
codecs++;
@@ -1356,7 +1362,16 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
snd_printk(KERN_ERR SFX "no codecs initialized\n");
return -ENXIO;
}
+ return 0;
+}
+/* configure each codec instance */
+static int __devinit azx_codec_configure(struct azx *chip)
+{
+ struct hda_codec *codec;
+ list_for_each_entry(codec, &chip->bus->codec_list, list) {
+ snd_hda_codec_configure(codec);
+ }
return 0;
}
@@ -1454,6 +1469,18 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
mutex_unlock(&chip->open_mutex);
return err;
}
+ snd_pcm_limit_hw_rates(runtime);
+ /* sanity check */
+ if (snd_BUG_ON(!runtime->hw.channels_min) ||
+ snd_BUG_ON(!runtime->hw.channels_max) ||
+ snd_BUG_ON(!runtime->hw.formats) ||
+ snd_BUG_ON(!runtime->hw.rates)) {
+ azx_release_device(azx_dev);
+ hinfo->ops.close(hinfo, apcm->codec, substream);
+ snd_hda_power_down(apcm->codec);
+ mutex_unlock(&chip->open_mutex);
+ return -EINVAL;
+ }
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = substream;
azx_dev->running = 0;
@@ -1462,7 +1489,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
runtime->private_data = azx_dev;
snd_pcm_set_sync(substream);
mutex_unlock(&chip->open_mutex);
-
return 0;
}
@@ -2460,15 +2486,32 @@ static int __devinit azx_probe(struct pci_dev *pci,
return err;
}
+ /* set this here since it's referred in snd_hda_load_patch() */
+ snd_card_set_dev(card, &pci->dev);
+
err = azx_create(card, pci, dev, pci_id->driver_data, &chip);
if (err < 0)
goto out_free;
card->private_data = chip;
/* create codec instances */
- err = azx_codec_create(chip, model[dev], probe_only[dev]);
+ err = azx_codec_create(chip, model[dev]);
if (err < 0)
goto out_free;
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+ if (patch[dev]) {
+ snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
+ patch[dev]);
+ err = snd_hda_load_patch(chip->bus, patch[dev]);
+ if (err < 0)
+ goto out_free;
+ }
+#endif
+ if (!probe_only[dev]) {
+ err = azx_codec_configure(chip);
+ if (err < 0)
+ goto out_free;
+ }
/* create PCM streams */
err = snd_hda_build_pcms(chip->bus);
@@ -2480,8 +2523,6 @@ static int __devinit azx_probe(struct pci_dev *pci,
if (err < 0)
goto out_free;
- snd_card_set_dev(card, &pci->dev);
-
err = snd_card_register(card);
if (err < 0)
goto out_free;
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 83349013b4df..75aa3785212f 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -99,7 +99,6 @@ struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
unsigned int *tlv, const char **slaves);
int snd_hda_codec_reset(struct hda_codec *codec);
-int snd_hda_codec_configure(struct hda_codec *codec);
/* amp value bits */
#define HDA_AMP_MUTE 0x80
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 84cc49ca9148..be7d25fa7f35 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -72,6 +72,7 @@ struct ad198x_spec {
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
unsigned int jack_present :1;
+ unsigned int inv_jack_detect:1;
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_loopback_check loopback;
@@ -669,39 +670,13 @@ static struct hda_input_mux ad1986a_automic_capture_source = {
},
};
-static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
+static struct snd_kcontrol_new ad1986a_laptop_master_mixers[] = {
HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "External Amplifier",
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad198x_eapd_put,
- .private_value = 0x1b | (1 << 8), /* port-D, inversed */
- },
{ } /* end */
};
-static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
- HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
- HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
+static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
@@ -727,6 +702,12 @@ static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
{ } /* end */
};
+static struct snd_kcontrol_new ad1986a_laptop_intmic_mixers[] = {
+ HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
+ { } /* end */
+};
+
/* re-connect the mic boost input according to the jack sensing */
static void ad1986a_automic(struct hda_codec *codec)
{
@@ -776,8 +757,9 @@ static void ad1986a_hp_automute(struct hda_codec *codec)
unsigned int present;
present = snd_hda_codec_read(codec, 0x1a, 0, AC_VERB_GET_PIN_SENSE, 0);
- /* Lenovo N100 seems to report the reversed bit for HP jack-sensing */
- spec->jack_present = !(present & 0x80000000);
+ spec->jack_present = !!(present & 0x80000000);
+ if (spec->inv_jack_detect)
+ spec->jack_present = !spec->jack_present;
ad1986a_update_hp(codec);
}
@@ -816,7 +798,7 @@ static int ad1986a_hp_master_sw_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new ad1986a_laptop_automute_mixers[] = {
+static struct snd_kcontrol_new ad1986a_automute_master_mixers[] = {
HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -826,33 +808,10 @@ static struct snd_kcontrol_new ad1986a_laptop_automute_mixers[] = {
.put = ad1986a_hp_master_sw_put,
.private_value = HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
},
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "External Amplifier",
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad198x_eapd_put,
- .private_value = 0x1b | (1 << 8), /* port-D, inversed */
- },
{ } /* end */
};
+
/*
* initialization verbs
*/
@@ -981,6 +940,27 @@ static struct hda_verb ad1986a_hp_init_verbs[] = {
{}
};
+static void ad1986a_samsung_p50_unsol_event(struct hda_codec *codec,
+ unsigned int res)
+{
+ switch (res >> 26) {
+ case AD1986A_HP_EVENT:
+ ad1986a_hp_automute(codec);
+ break;
+ case AD1986A_MIC_EVENT:
+ ad1986a_automic(codec);
+ break;
+ }
+}
+
+static int ad1986a_samsung_p50_init(struct hda_codec *codec)
+{
+ ad198x_init(codec);
+ ad1986a_hp_automute(codec);
+ ad1986a_automic(codec);
+ return 0;
+}
+
/* models */
enum {
@@ -991,6 +971,7 @@ enum {
AD1986A_LAPTOP_AUTOMUTE,
AD1986A_ULTRA,
AD1986A_SAMSUNG,
+ AD1986A_SAMSUNG_P50,
AD1986A_MODELS
};
@@ -1002,6 +983,7 @@ static const char *ad1986a_models[AD1986A_MODELS] = {
[AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
[AD1986A_ULTRA] = "ultra",
[AD1986A_SAMSUNG] = "samsung",
+ [AD1986A_SAMSUNG_P50] = "samsung-p50",
};
static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
@@ -1024,6 +1006,7 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
+ SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50),
SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_SAMSUNG),
SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
@@ -1111,7 +1094,10 @@ static int patch_ad1986a(struct hda_codec *codec)
spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
break;
case AD1986A_LAPTOP_EAPD:
- spec->mixers[0] = ad1986a_laptop_eapd_mixers;
+ spec->num_mixers = 3;
+ spec->mixers[0] = ad1986a_laptop_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
+ spec->mixers[2] = ad1986a_laptop_intmic_mixers;
spec->num_init_verbs = 2;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->multiout.max_channels = 2;
@@ -1122,7 +1108,9 @@ static int patch_ad1986a(struct hda_codec *codec)
spec->input_mux = &ad1986a_laptop_eapd_capture_source;
break;
case AD1986A_SAMSUNG:
- spec->mixers[0] = ad1986a_samsung_mixers;
+ spec->num_mixers = 2;
+ spec->mixers[0] = ad1986a_laptop_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
spec->num_init_verbs = 3;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->init_verbs[2] = ad1986a_automic_verbs;
@@ -1135,8 +1123,28 @@ static int patch_ad1986a(struct hda_codec *codec)
codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
codec->patch_ops.init = ad1986a_automic_init;
break;
+ case AD1986A_SAMSUNG_P50:
+ spec->num_mixers = 2;
+ spec->mixers[0] = ad1986a_automute_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
+ spec->num_init_verbs = 4;
+ spec->init_verbs[1] = ad1986a_eapd_init_verbs;
+ spec->init_verbs[2] = ad1986a_automic_verbs;
+ spec->init_verbs[3] = ad1986a_hp_init_verbs;
+ spec->multiout.max_channels = 2;
+ spec->multiout.num_dacs = 1;
+ spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
+ if (!is_jack_available(codec, 0x25))
+ spec->multiout.dig_out_nid = 0;
+ spec->input_mux = &ad1986a_automic_capture_source;
+ codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
+ codec->patch_ops.init = ad1986a_samsung_p50_init;
+ break;
case AD1986A_LAPTOP_AUTOMUTE:
- spec->mixers[0] = ad1986a_laptop_automute_mixers;
+ spec->num_mixers = 3;
+ spec->mixers[0] = ad1986a_automute_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
+ spec->mixers[2] = ad1986a_laptop_intmic_mixers;
spec->num_init_verbs = 3;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->init_verbs[2] = ad1986a_hp_init_verbs;
@@ -1148,6 +1156,10 @@ static int patch_ad1986a(struct hda_codec *codec)
spec->input_mux = &ad1986a_laptop_eapd_capture_source;
codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
codec->patch_ops.init = ad1986a_hp_init;
+ /* Lenovo N100 seems to report the reversed bit
+ * for HP jack-sensing
+ */
+ spec->inv_jack_detect = 1;
break;
case AD1986A_ULTRA:
spec->mixers[0] = ad1986a_laptop_eapd_mixers;
@@ -3734,9 +3746,30 @@ static struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
{ } /* end */
};
+static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
+ int mute = (!ucontrol->value.integer.value[0] &&
+ !ucontrol->value.integer.value[1]);
+ /* toggle GPIO1 according to the mute state */
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ mute ? 0x02 : 0x0);
+ return ret;
+}
+
static struct snd_kcontrol_new ad1884a_mobile_mixers[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
+ /*HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Switch",
+ .info = snd_hda_mixer_amp_switch_info,
+ .get = snd_hda_mixer_amp_switch_get,
+ .put = ad1884a_mobile_master_sw_put,
+ .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
+ },
HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Capture Volume", 0x14, 0x0, HDA_INPUT),
@@ -3857,6 +3890,10 @@ static struct hda_verb ad1884a_mobile_verbs[] = {
/* unsolicited event for pin-sense */
{0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
{0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
+ /* allow to touch GPIO1 (for mute control) */
+ {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
+ {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
+ {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
{ } /* end */
};
@@ -3966,6 +4003,7 @@ static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP),
SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE),
SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x3070, "HP", AD1884A_MOBILE),
+ SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30d0, "HP laptop", AD1884A_LAPTOP),
SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30e0, "HP laptop", AD1884A_LAPTOP),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
index 392d108c3558..019ca7cb56d7 100644
--- a/sound/pci/hda/patch_ca0110.c
+++ b/sound/pci/hda/patch_ca0110.c
@@ -510,7 +510,7 @@ static int ca0110_parse_auto_config(struct hda_codec *codec)
}
-int patch_ca0110(struct hda_codec *codec)
+static int patch_ca0110(struct hda_codec *codec)
{
struct ca0110_spec *spec;
int err;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 334533197425..de72b72f58f4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -208,12 +208,6 @@ enum {
ALC885_MBP3,
ALC885_MB5,
ALC885_IMAC24,
- ALC882_AUTO,
- ALC882_MODEL_LAST,
-};
-
-/* ALC883 models */
-enum {
ALC883_3ST_2ch_DIG,
ALC883_3ST_6ch_DIG,
ALC883_3ST_6ch,
@@ -246,8 +240,8 @@ enum {
ALC889A_MB31,
ALC1200_ASUS_P5Q,
ALC883_SONY_VAIO_TT,
- ALC883_AUTO,
- ALC883_MODEL_LAST,
+ ALC882_AUTO,
+ ALC882_MODEL_LAST,
};
/* for GPIO Poll */
@@ -320,6 +314,8 @@ struct alc_spec {
struct snd_array kctls;
struct hda_input_mux private_imux[3];
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
+ hda_nid_t private_adc_nids[AUTO_CFG_MAX_OUTS];
+ hda_nid_t private_capsrc_nids[AUTO_CFG_MAX_OUTS];
/* hooks */
void (*init_hook)(struct hda_codec *codec);
@@ -945,12 +941,13 @@ static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid,
static void alc_automute_pin(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int present;
+ unsigned int present, pincap;
unsigned int nid = spec->autocfg.hp_pins[0];
int i;
- /* need to execute and sync at first */
- snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
+ pincap = snd_hda_query_pin_caps(codec, nid);
+ if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
+ snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
present = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_PIN_SENSE, 0);
spec->jack_present = (present & AC_PINSENSE_PRESENCE) != 0;
@@ -1392,7 +1389,7 @@ static struct hda_verb alc888_fujitsu_xa3530_verbs[] = {
static void alc_automute_amp(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int val, mute;
+ unsigned int val, mute, pincap;
hda_nid_t nid;
int i;
@@ -1401,6 +1398,10 @@ static void alc_automute_amp(struct hda_codec *codec)
nid = spec->autocfg.hp_pins[i];
if (!nid)
break;
+ pincap = snd_hda_query_pin_caps(codec, nid);
+ if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
+ snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_SET_PIN_SENSE, 0);
val = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_PIN_SENSE, 0);
if (val & AC_PINSENSE_PRESENCE) {
@@ -1471,6 +1472,10 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = {
static struct hda_verb alc888_acer_aspire_6530g_verbs[] = {
/* Bias voltage on for external mic port */
{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80},
+/* Front Mic: set to PIN_IN (empty by default) */
+ {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+/* Unselect Front Mic by default in input mixer 3 */
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0xb)},
/* Enable unsolicited event for HP jack */
{0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
/* Enable speaker output */
@@ -1560,18 +1565,22 @@ static struct hda_input_mux alc888_2_capture_sources[2] = {
static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = {
/* Interal mic only available on one ADC */
{
- .num_items = 3,
+ .num_items = 5,
.items = {
{ "Ext Mic", 0x0 },
+ { "Line In", 0x2 },
{ "CD", 0x4 },
+ { "Input Mix", 0xa },
{ "Int Mic", 0xb },
},
},
{
- .num_items = 2,
+ .num_items = 4,
.items = {
{ "Ext Mic", 0x0 },
+ { "Line In", 0x2 },
{ "CD", 0x4 },
+ { "Input Mix", 0xa },
},
}
};
@@ -1639,6 +1648,17 @@ static void alc888_acer_aspire_4930g_init_hook(struct hda_codec *codec)
alc_automute_amp(codec);
}
+static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ spec->autocfg.hp_pins[0] = 0x15;
+ spec->autocfg.speaker_pins[0] = 0x14;
+ spec->autocfg.speaker_pins[1] = 0x16;
+ spec->autocfg.speaker_pins[2] = 0x17;
+ alc_automute_amp(codec);
+}
+
static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -6271,7 +6291,7 @@ static int patch_alc260(struct hda_codec *codec)
/*
- * ALC882 support
+ * ALC882/883/885/888/889 support
*
* ALC882 is almost identical with ALC880 but has cleaner and more flexible
* configuration. Each pin widget can choose any input DACs and a mixer.
@@ -6283,22 +6303,35 @@ static int patch_alc260(struct hda_codec *codec)
*/
#define ALC882_DIGOUT_NID 0x06
#define ALC882_DIGIN_NID 0x0a
+#define ALC883_DIGOUT_NID ALC882_DIGOUT_NID
+#define ALC883_DIGIN_NID ALC882_DIGIN_NID
+#define ALC1200_DIGOUT_NID 0x10
+
static struct hda_channel_mode alc882_ch_modes[1] = {
{ 8, NULL }
};
+/* DACs */
static hda_nid_t alc882_dac_nids[4] = {
/* front, rear, clfe, rear_surr */
0x02, 0x03, 0x04, 0x05
};
+#define alc883_dac_nids alc882_dac_nids
-/* identical with ALC880 */
+/* ADCs */
#define alc882_adc_nids alc880_adc_nids
#define alc882_adc_nids_alt alc880_adc_nids_alt
+#define alc883_adc_nids alc882_adc_nids_alt
+static hda_nid_t alc883_adc_nids_alt[1] = { 0x08 };
+static hda_nid_t alc883_adc_nids_rev[2] = { 0x09, 0x08 };
+#define alc889_adc_nids alc880_adc_nids
static hda_nid_t alc882_capsrc_nids[3] = { 0x24, 0x23, 0x22 };
static hda_nid_t alc882_capsrc_nids_alt[2] = { 0x23, 0x22 };
+#define alc883_capsrc_nids alc882_capsrc_nids_alt
+static hda_nid_t alc883_capsrc_nids_rev[2] = { 0x22, 0x23 };
+#define alc889_capsrc_nids alc882_capsrc_nids
/* input MUX */
/* FIXME: should be a matrix-type input source selection */
@@ -6313,6 +6346,8 @@ static struct hda_input_mux alc882_capture_source = {
},
};
+#define alc883_capture_source alc882_capture_source
+
static struct hda_input_mux mb5_capture_source = {
.num_items = 3,
.items = {
@@ -6322,6 +6357,77 @@ static struct hda_input_mux mb5_capture_source = {
},
};
+static struct hda_input_mux alc883_3stack_6ch_intel = {
+ .num_items = 4,
+ .items = {
+ { "Mic", 0x1 },
+ { "Front Mic", 0x0 },
+ { "Line", 0x2 },
+ { "CD", 0x4 },
+ },
+};
+
+static struct hda_input_mux alc883_lenovo_101e_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x1 },
+ { "Line", 0x2 },
+ },
+};
+
+static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
+ .num_items = 4,
+ .items = {
+ { "Mic", 0x0 },
+ { "iMic", 0x1 },
+ { "Line", 0x2 },
+ { "CD", 0x4 },
+ },
+};
+
+static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x0 },
+ { "Int Mic", 0x1 },
+ },
+};
+
+static struct hda_input_mux alc883_lenovo_sky_capture_source = {
+ .num_items = 3,
+ .items = {
+ { "Mic", 0x0 },
+ { "Front Mic", 0x1 },
+ { "Line", 0x4 },
+ },
+};
+
+static struct hda_input_mux alc883_asus_eee1601_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x0 },
+ { "Line", 0x2 },
+ },
+};
+
+static struct hda_input_mux alc889A_mb31_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x0 },
+ /* Front Mic (0x01) unused */
+ { "Line", 0x2 },
+ /* Line 2 (0x03) unused */
+ /* CD (0x04) unsused? */
+ },
+};
+
+/*
+ * 2ch mode
+ */
+static struct hda_channel_mode alc883_3ST_2ch_modes[1] = {
+ { 2, NULL }
+};
+
/*
* 2ch mode
*/
@@ -6334,6 +6440,18 @@ static struct hda_verb alc882_3ST_ch2_init[] = {
};
/*
+ * 4ch mode
+ */
+static struct hda_verb alc882_3ST_ch4_init[] = {
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
* 6ch mode
*/
static struct hda_verb alc882_3ST_ch6_init[] = {
@@ -6346,11 +6464,14 @@ static struct hda_verb alc882_3ST_ch6_init[] = {
{ } /* end */
};
-static struct hda_channel_mode alc882_3ST_6ch_modes[2] = {
+static struct hda_channel_mode alc882_3ST_6ch_modes[3] = {
{ 2, alc882_3ST_ch2_init },
+ { 4, alc882_3ST_ch4_init },
{ 6, alc882_3ST_ch6_init },
};
+#define alc883_3ST_6ch_modes alc882_3ST_6ch_modes
+
/*
* 6ch mode
*/
@@ -6438,6 +6559,143 @@ static struct hda_channel_mode alc885_mb5_6ch_modes[2] = {
{ 6, alc885_mb5_ch6_init },
};
+
+/*
+ * 2ch mode
+ */
+static struct hda_verb alc883_4ST_ch2_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 4ch mode
+ */
+static struct hda_verb alc883_4ST_ch4_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_4ST_ch6_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 8ch mode
+ */
+static struct hda_verb alc883_4ST_ch8_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
+ { 2, alc883_4ST_ch2_init },
+ { 4, alc883_4ST_ch4_init },
+ { 6, alc883_4ST_ch6_init },
+ { 8, alc883_4ST_ch8_init },
+};
+
+
+/*
+ * 2ch mode
+ */
+static struct hda_verb alc883_3ST_ch2_intel_init[] = {
+ { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 4ch mode
+ */
+static struct hda_verb alc883_3ST_ch4_intel_init[] = {
+ { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_3ST_ch6_intel_init[] = {
+ { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x19, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_3ST_6ch_intel_modes[3] = {
+ { 2, alc883_3ST_ch2_intel_init },
+ { 4, alc883_3ST_ch4_intel_init },
+ { 6, alc883_3ST_ch6_intel_init },
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_sixstack_ch6_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00 },
+ { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { } /* end */
+};
+
+/*
+ * 8ch mode
+ */
+static struct hda_verb alc883_sixstack_ch8_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_sixstack_modes[2] = {
+ { 6, alc883_sixstack_ch6_init },
+ { 8, alc883_sixstack_ch8_init },
+};
+
+
/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17
* Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
*/
@@ -6573,7 +6831,7 @@ static struct snd_kcontrol_new alc882_chmode_mixer[] = {
{ } /* end */
};
-static struct hda_verb alc882_init_verbs[] = {
+static struct hda_verb alc882_base_init_verbs[] = {
/* Front mixer: unmute input/output amp left and right (volume = 0) */
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -6591,6 +6849,13 @@ static struct hda_verb alc882_init_verbs[] = {
{0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ /* mute analog input loopbacks */
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+
/* Front Pin: output 0 (0x0c) */
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
@@ -6625,11 +6890,6 @@ static struct hda_verb alc882_init_verbs[] = {
/* FIXME: use matrix-type input source selection */
/* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* Input mixer2 */
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
@@ -6640,9 +6900,6 @@ static struct hda_verb alc882_init_verbs[] = {
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* ADC1: mute amp left and right */
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
/* ADC2: mute amp left and right */
{0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -6653,6 +6910,18 @@ static struct hda_verb alc882_init_verbs[] = {
{ }
};
+static struct hda_verb alc882_adc1_init_verbs[] = {
+ /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+ /* ADC1: mute amp left and right */
+ {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
+ { }
+};
+
static struct hda_verb alc882_eapd_verbs[] = {
/* change to EAPD mode */
{0x20, AC_VERB_SET_COEF_INDEX, 0x07},
@@ -6660,6 +6929,8 @@ static struct hda_verb alc882_eapd_verbs[] = {
{ }
};
+#define alc883_init_verbs alc882_base_init_verbs
+
/* Mac Pro test */
static struct snd_kcontrol_new alc882_macpro_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -7010,12 +7281,10 @@ static void alc885_imac24_init_hook(struct hda_codec *codec)
/*
* generic initialization of ADC, input mixers and output mixers
*/
-static struct hda_verb alc882_auto_init_verbs[] = {
+static struct hda_verb alc883_auto_init_verbs[] = {
/*
* Unmute ADC0-2 and set the default input to mic-in
*/
- {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
{0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -7056,11 +7325,6 @@ static struct hda_verb alc882_auto_init_verbs[] = {
/* FIXME: use matrix-type input source selection */
/* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x02 << 8))},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x04 << 8))},
/* Input mixer2 */
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
@@ -7075,819 +7339,6 @@ static struct hda_verb alc882_auto_init_verbs[] = {
{ }
};
-#ifdef CONFIG_SND_HDA_POWER_SAVE
-#define alc882_loopbacks alc880_loopbacks
-#endif
-
-/* pcm configuration: identical with ALC880 */
-#define alc882_pcm_analog_playback alc880_pcm_analog_playback
-#define alc882_pcm_analog_capture alc880_pcm_analog_capture
-#define alc882_pcm_digital_playback alc880_pcm_digital_playback
-#define alc882_pcm_digital_capture alc880_pcm_digital_capture
-
-/*
- * configuration and preset
- */
-static const char *alc882_models[ALC882_MODEL_LAST] = {
- [ALC882_3ST_DIG] = "3stack-dig",
- [ALC882_6ST_DIG] = "6stack-dig",
- [ALC882_ARIMA] = "arima",
- [ALC882_W2JC] = "w2jc",
- [ALC882_TARGA] = "targa",
- [ALC882_ASUS_A7J] = "asus-a7j",
- [ALC882_ASUS_A7M] = "asus-a7m",
- [ALC885_MACPRO] = "macpro",
- [ALC885_MB5] = "mb5",
- [ALC885_MBP3] = "mbp3",
- [ALC885_IMAC24] = "imac24",
- [ALC882_AUTO] = "auto",
-};
-
-static struct snd_pci_quirk alc882_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1043, 0x060d, "Asus A7J", ALC882_ASUS_A7J),
- SND_PCI_QUIRK(0x1043, 0x1243, "Asus A7J", ALC882_ASUS_A7J),
- SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_ASUS_A7M),
- SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_W2JC),
- SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
- SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x161f, 0x2054, "Arima W820", ALC882_ARIMA),
- {}
-};
-
-static struct alc_config_preset alc882_presets[] = {
- [ALC882_3ST_DIG] = {
- .mixers = { alc882_base_mixer },
- .init_verbs = { alc882_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
- .channel_mode = alc882_ch_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_6ST_DIG] = {
- .mixers = { alc882_base_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
- .channel_mode = alc882_sixstack_modes,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_ARIMA] = {
- .mixers = { alc882_base_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_eapd_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
- .channel_mode = alc882_sixstack_modes,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_W2JC] = {
- .mixers = { alc882_w2jc_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_eapd_verbs,
- alc880_gpio1_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
- .channel_mode = alc880_threestack_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- .dig_out_nid = ALC882_DIGOUT_NID,
- },
- [ALC885_MBP3] = {
- .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
- .init_verbs = { alc885_mbp3_init_verbs,
- alc880_gpio1_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .channel_mode = alc885_mbp_6ch_modes,
- .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
- .input_mux = &alc882_capture_source,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc885_mbp3_init_hook,
- },
- [ALC885_MB5] = {
- .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
- .init_verbs = { alc885_mb5_init_verbs,
- alc880_gpio1_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .channel_mode = alc885_mb5_6ch_modes,
- .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
- .input_mux = &mb5_capture_source,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- },
- [ALC885_MACPRO] = {
- .mixers = { alc882_macpro_mixer },
- .init_verbs = { alc882_macpro_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
- .channel_mode = alc882_ch_modes,
- .input_mux = &alc882_capture_source,
- .init_hook = alc885_macpro_init_hook,
- },
- [ALC885_IMAC24] = {
- .mixers = { alc885_imac24_mixer },
- .init_verbs = { alc885_imac24_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
- .channel_mode = alc882_ch_modes,
- .input_mux = &alc882_capture_source,
- .unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc885_imac24_init_hook,
- },
- [ALC882_TARGA] = {
- .mixers = { alc882_targa_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_targa_verbs},
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
- .adc_nids = alc882_adc_nids,
- .capsrc_nids = alc882_capsrc_nids,
- .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
- .channel_mode = alc882_3ST_6ch_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- .unsol_event = alc882_targa_unsol_event,
- .init_hook = alc882_targa_init_hook,
- },
- [ALC882_ASUS_A7J] = {
- .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_asus_a7j_verbs},
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
- .adc_nids = alc882_adc_nids,
- .capsrc_nids = alc882_capsrc_nids,
- .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
- .channel_mode = alc882_3ST_6ch_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_ASUS_A7M] = {
- .mixers = { alc882_asus_a7m_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_eapd_verbs,
- alc880_gpio1_init_verbs,
- alc882_asus_a7m_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
- .channel_mode = alc880_threestack_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- },
-};
-
-
-/*
- * Pin config fixes
- */
-enum {
- PINFIX_ABIT_AW9D_MAX
-};
-
-static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
- { 0x15, 0x01080104 }, /* side */
- { 0x16, 0x01011012 }, /* rear */
- { 0x17, 0x01016011 }, /* clfe */
- { }
-};
-
-static const struct alc_pincfg *alc882_pin_fixes[] = {
- [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
-};
-
-static struct snd_pci_quirk alc882_pinfix_tbl[] = {
- SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
- {}
-};
-
-/*
- * BIOS auto configuration
- */
-static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
- hda_nid_t nid, int pin_type,
- int dac_idx)
-{
- /* set as output */
- struct alc_spec *spec = codec->spec;
- int idx;
-
- alc_set_pin_output(codec, nid, pin_type);
- if (spec->multiout.dac_nids[dac_idx] == 0x25)
- idx = 4;
- else
- idx = spec->multiout.dac_nids[dac_idx] - 2;
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
-
-}
-
-static void alc882_auto_init_multi_out(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i <= HDA_SIDE; i++) {
- hda_nid_t nid = spec->autocfg.line_out_pins[i];
- int pin_type = get_pin_type(spec->autocfg.line_out_type);
- if (nid)
- alc882_auto_set_output_and_unmute(codec, nid, pin_type,
- i);
- }
-}
-
-static void alc882_auto_init_hp_out(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- hda_nid_t pin;
-
- pin = spec->autocfg.hp_pins[0];
- if (pin) /* connect to front */
- /* use dac 0 */
- alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
- pin = spec->autocfg.speaker_pins[0];
- if (pin)
- alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
-}
-
-#define alc882_is_input_pin(nid) alc880_is_input_pin(nid)
-#define ALC882_PIN_CD_NID ALC880_PIN_CD_NID
-
-static void alc882_auto_init_analog_input(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i < AUTO_PIN_LAST; i++) {
- hda_nid_t nid = spec->autocfg.input_pins[i];
- if (!nid)
- continue;
- alc_set_input_pin(codec, nid, AUTO_PIN_FRONT_MIC /*i*/);
- if (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_OUT_MUTE);
- }
-}
-
-static void alc882_auto_init_input_src(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int c;
-
- for (c = 0; c < spec->num_adc_nids; c++) {
- hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
- hda_nid_t nid = spec->capsrc_nids[c];
- unsigned int mux_idx;
- const struct hda_input_mux *imux;
- int conns, mute, idx, item;
-
- conns = snd_hda_get_connections(codec, nid, conn_list,
- ARRAY_SIZE(conn_list));
- if (conns < 0)
- continue;
- mux_idx = c >= spec->num_mux_defs ? 0 : c;
- imux = &spec->input_mux[mux_idx];
- for (idx = 0; idx < conns; idx++) {
- /* if the current connection is the selected one,
- * unmute it as default - otherwise mute it
- */
- mute = AMP_IN_MUTE(idx);
- for (item = 0; item < imux->num_items; item++) {
- if (imux->items[item].index == idx) {
- if (spec->cur_mux[c] == item)
- mute = AMP_IN_UNMUTE(idx);
- break;
- }
- }
- /* check if we have a selector or mixer
- * we could check for the widget type instead, but
- * just check for Amp-In presence (in case of mixer
- * without amp-in there is something wrong, this
- * function shouldn't be used or capsrc nid is wrong)
- */
- if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- mute);
- else if (mute != AMP_IN_MUTE(idx))
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CONNECT_SEL,
- idx);
- }
- }
-}
-
-/* add mic boosts if needed */
-static int alc_auto_add_mic_boost(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int err;
- hda_nid_t nid;
-
- nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
- if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Mic Boost",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
- if (err < 0)
- return err;
- }
- nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
- if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Front Mic Boost",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
- if (err < 0)
- return err;
- }
- return 0;
-}
-
-/* almost identical with ALC880 parser... */
-static int alc882_parse_auto_config(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int err = alc880_parse_auto_config(codec);
-
- if (err < 0)
- return err;
- else if (!err)
- return 0; /* no config found */
-
- err = alc_auto_add_mic_boost(codec);
- if (err < 0)
- return err;
-
- /* hack - override the init verbs */
- spec->init_verbs[0] = alc882_auto_init_verbs;
-
- return 1; /* config found */
-}
-
-/* additional initialization for auto-configuration model */
-static void alc882_auto_init(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- alc882_auto_init_multi_out(codec);
- alc882_auto_init_hp_out(codec);
- alc882_auto_init_analog_input(codec);
- alc882_auto_init_input_src(codec);
- if (spec->unsol_event)
- alc_inithook(codec);
-}
-
-static int patch_alc883(struct hda_codec *codec); /* called in patch_alc882() */
-
-static int patch_alc882(struct hda_codec *codec)
-{
- struct alc_spec *spec;
- int err, board_config;
-
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
-
- board_config = snd_hda_check_board_config(codec, ALC882_MODEL_LAST,
- alc882_models,
- alc882_cfg_tbl);
-
- if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
- /* Pick up systems that don't supply PCI SSID */
- switch (codec->subsystem_id) {
- case 0x106b0c00: /* Mac Pro */
- board_config = ALC885_MACPRO;
- break;
- case 0x106b1000: /* iMac 24 */
- case 0x106b2800: /* AppleTV */
- case 0x106b3e00: /* iMac 24 Aluminium */
- board_config = ALC885_IMAC24;
- break;
- case 0x106b00a0: /* MacBookPro3,1 - Another revision */
- case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
- case 0x106b00a4: /* MacbookPro4,1 */
- case 0x106b2c00: /* Macbook Pro rev3 */
- /* Macbook 3.1 (0x106b3600) is handled by patch_alc883() */
- case 0x106b3800: /* MacbookPro4,1 - latter revision */
- board_config = ALC885_MBP3;
- break;
- case 0x106b3f00: /* Macbook 5,1 */
- case 0x106b4000: /* Macbook Pro 5,1 - FIXME: HP jack sense
- * seems not working, so apparently
- * no perfect solution yet
- */
- board_config = ALC885_MB5;
- break;
- default:
- /* ALC889A is handled better as ALC888-compatible */
- if (codec->revision_id == 0x100101 ||
- codec->revision_id == 0x100103) {
- alc_free(codec);
- return patch_alc883(codec);
- }
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n",
- codec->chip_name);
- board_config = ALC882_AUTO;
- }
- }
-
- alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
-
- if (board_config == ALC882_AUTO) {
- /* automatic parse from the BIOS config */
- err = alc882_parse_auto_config(codec);
- if (err < 0) {
- alc_free(codec);
- return err;
- } else if (!err) {
- printk(KERN_INFO
- "hda_codec: Cannot set up configuration "
- "from BIOS. Using base mode...\n");
- board_config = ALC882_3ST_DIG;
- }
- }
-
- err = snd_hda_attach_beep_device(codec, 0x1);
- if (err < 0) {
- alc_free(codec);
- return err;
- }
-
- if (board_config != ALC882_AUTO)
- setup_preset(spec, &alc882_presets[board_config]);
-
- spec->stream_analog_playback = &alc882_pcm_analog_playback;
- spec->stream_analog_capture = &alc882_pcm_analog_capture;
- /* FIXME: setup DAC5 */
- /*spec->stream_analog_alt_playback = &alc880_pcm_analog_alt_playback;*/
- spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
-
- spec->stream_digital_playback = &alc882_pcm_digital_playback;
- spec->stream_digital_capture = &alc882_pcm_digital_capture;
-
- if (!spec->adc_nids && spec->input_mux) {
- /* check whether NID 0x07 is valid */
- unsigned int wcap = get_wcaps(codec, 0x07);
- /* get type */
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
- if (wcap != AC_WID_AUD_IN) {
- spec->adc_nids = alc882_adc_nids_alt;
- spec->num_adc_nids = ARRAY_SIZE(alc882_adc_nids_alt);
- spec->capsrc_nids = alc882_capsrc_nids_alt;
- } else {
- spec->adc_nids = alc882_adc_nids;
- spec->num_adc_nids = ARRAY_SIZE(alc882_adc_nids);
- spec->capsrc_nids = alc882_capsrc_nids;
- }
- }
- set_capture_mixer(spec);
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
-
- spec->vmaster_nid = 0x0c;
-
- codec->patch_ops = alc_patch_ops;
- if (board_config == ALC882_AUTO)
- spec->init_hook = alc882_auto_init;
-#ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc882_loopbacks;
-#endif
- codec->proc_widget_hook = print_realtek_coef;
-
- return 0;
-}
-
-/*
- * ALC883 support
- *
- * ALC883 is almost identical with ALC880 but has cleaner and more flexible
- * configuration. Each pin widget can choose any input DACs and a mixer.
- * Each ADC is connected from a mixer of all inputs. This makes possible
- * 6-channel independent captures.
- *
- * In addition, an independent DAC for the multi-playback (not used in this
- * driver yet).
- */
-#define ALC883_DIGOUT_NID 0x06
-#define ALC883_DIGIN_NID 0x0a
-
-#define ALC1200_DIGOUT_NID 0x10
-
-static hda_nid_t alc883_dac_nids[4] = {
- /* front, rear, clfe, rear_surr */
- 0x02, 0x03, 0x04, 0x05
-};
-
-static hda_nid_t alc883_adc_nids[2] = {
- /* ADC1-2 */
- 0x08, 0x09,
-};
-
-static hda_nid_t alc883_adc_nids_alt[1] = {
- /* ADC1 */
- 0x08,
-};
-
-static hda_nid_t alc883_adc_nids_rev[2] = {
- /* ADC2-1 */
- 0x09, 0x08
-};
-
-#define alc889_adc_nids alc880_adc_nids
-
-static hda_nid_t alc883_capsrc_nids[2] = { 0x23, 0x22 };
-
-static hda_nid_t alc883_capsrc_nids_rev[2] = { 0x22, 0x23 };
-
-#define alc889_capsrc_nids alc882_capsrc_nids
-
-/* input MUX */
-/* FIXME: should be a matrix-type input source selection */
-
-static struct hda_input_mux alc883_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 },
- { "Front Mic", 0x1 },
- { "Line", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_3stack_6ch_intel = {
- .num_items = 4,
- .items = {
- { "Mic", 0x1 },
- { "Front Mic", 0x0 },
- { "Line", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_lenovo_101e_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x1 },
- { "Line", 0x2 },
- },
-};
-
-static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 },
- { "iMic", 0x1 },
- { "Line", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- { "Int Mic", 0x1 },
- },
-};
-
-static struct hda_input_mux alc883_lenovo_sky_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic", 0x0 },
- { "Front Mic", 0x1 },
- { "Line", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_asus_eee1601_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- { "Line", 0x2 },
- },
-};
-
-static struct hda_input_mux alc889A_mb31_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- /* Front Mic (0x01) unused */
- { "Line", 0x2 },
- /* Line 2 (0x03) unused */
- /* CD (0x04) unsused? */
- },
-};
-
-/*
- * 2ch mode
- */
-static struct hda_channel_mode alc883_3ST_2ch_modes[1] = {
- { 2, NULL }
-};
-
-/*
- * 2ch mode
- */
-static struct hda_verb alc883_3ST_ch2_init[] = {
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-
-/*
- * 4ch mode
- */
-static struct hda_verb alc883_3ST_ch4_init[] = {
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_3ST_ch6_init[] = {
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_3ST_6ch_modes[3] = {
- { 2, alc883_3ST_ch2_init },
- { 4, alc883_3ST_ch4_init },
- { 6, alc883_3ST_ch6_init },
-};
-
-
-/*
- * 2ch mode
- */
-static struct hda_verb alc883_4ST_ch2_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-
-/*
- * 4ch mode
- */
-static struct hda_verb alc883_4ST_ch4_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_4ST_ch6_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 8ch mode
- */
-static struct hda_verb alc883_4ST_ch8_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
- { 2, alc883_4ST_ch2_init },
- { 4, alc883_4ST_ch4_init },
- { 6, alc883_4ST_ch6_init },
- { 8, alc883_4ST_ch8_init },
-};
-
-
-/*
- * 2ch mode
- */
-static struct hda_verb alc883_3ST_ch2_intel_init[] = {
- { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-
-/*
- * 4ch mode
- */
-static struct hda_verb alc883_3ST_ch4_intel_init[] = {
- { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_3ST_ch6_intel_init[] = {
- { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x19, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_3ST_6ch_intel_modes[3] = {
- { 2, alc883_3ST_ch2_intel_init },
- { 4, alc883_3ST_ch4_intel_init },
- { 6, alc883_3ST_ch6_intel_init },
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_sixstack_ch6_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00 },
- { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { } /* end */
-};
-
-/*
- * 8ch mode
- */
-static struct hda_verb alc883_sixstack_ch8_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_sixstack_modes[2] = {
- { 6, alc883_sixstack_ch6_init },
- { 8, alc883_sixstack_ch8_init },
-};
-
/* 2ch mode (Speaker:front, Subwoofer:CLFE, Line:input, Headphones:front) */
static struct hda_verb alc889A_mb31_ch2_init[] = {
{0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP as front */
@@ -7938,34 +7389,7 @@ static struct hda_verb alc883_medion_eapd_verbs[] = {
{ }
};
-/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17
- * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
- */
-
-static struct snd_kcontrol_new alc883_base_mixer[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
- HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
- { } /* end */
-};
+#define alc883_base_mixer alc882_base_mixer
static struct snd_kcontrol_new alc883_mitac_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -8189,6 +7613,8 @@ static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = {
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
@@ -8314,84 +7740,6 @@ static struct snd_kcontrol_new alc883_chmode_mixer[] = {
{ } /* end */
};
-static struct hda_verb alc883_init_verbs[] = {
- /* ADC1: mute amp left and right */
- {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* ADC2: mute amp left and right */
- {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* Front mixer: unmute input/output amp left and right (volume = 0) */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* Rear mixer */
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* CLFE mixer */
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* Side mixer */
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-
- /* mute analog input loopbacks */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-
- /* Front Pin: output 0 (0x0c) */
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* Rear Pin: output 1 (0x0d) */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x15, AC_VERB_SET_CONNECT_SEL, 0x01},
- /* CLFE Pin: output 2 (0x0e) */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x16, AC_VERB_SET_CONNECT_SEL, 0x02},
- /* Side Pin: output 3 (0x0f) */
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x17, AC_VERB_SET_CONNECT_SEL, 0x03},
- /* Mic (rear) pin: input vref at 80% */
- {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Front Mic pin: input vref at 80% */
- {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Line In pin: input */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Line-2 In: Headphone output (output 0 - 0x0c) */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* CD pin widget for input */
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-
- /* FIXME: use matrix-type input source selection */
- /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer2 */
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* Input mixer3 */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- { }
-};
-
/* toggle speaker-output according to the hp-jack state */
static void alc883_mitac_init_hook(struct hda_codec *codec)
{
@@ -8824,69 +8172,6 @@ static void alc883_vaiott_init_hook(struct hda_codec *codec)
alc_automute_amp(codec);
}
-/*
- * generic initialization of ADC, input mixers and output mixers
- */
-static struct hda_verb alc883_auto_init_verbs[] = {
- /*
- * Unmute ADC0-2 and set the default input to mic-in
- */
- {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
- {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
- {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-
- /* Mute input amps (CD, Line In, Mic 1 & Mic 2) of the analog-loopback
- * mixer widget
- * Note: PASD motherboards uses the Line In 2 as the input for
- * front panel mic (mic 2)
- */
- /* Amp Indices: Mic1 = 0, Mic2 = 1, Line1 = 2, Line2 = 3, CD = 4 */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-
- /*
- * Set up output mixers (0x0c - 0x0f)
- */
- /* set vol=0 to output mixers */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* set up input amps for analog loopback */
- /* Amp Indices: DAC = 0, mixer = 1 */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-
- /* FIXME: use matrix-type input source selection */
- /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer1 */
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
- /* {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, */
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)},
- /* Input mixer2 */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
- /* {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)},
-
- { }
-};
-
static struct hda_verb alc888_asus_m90v_verbs[] = {
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -8997,25 +8282,44 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
alc889A_mb31_automute(codec);
}
+
#ifdef CONFIG_SND_HDA_POWER_SAVE
-#define alc883_loopbacks alc880_loopbacks
+#define alc882_loopbacks alc880_loopbacks
#endif
/* pcm configuration: identical with ALC880 */
-#define alc883_pcm_analog_playback alc880_pcm_analog_playback
-#define alc883_pcm_analog_capture alc880_pcm_analog_capture
-#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
-#define alc883_pcm_digital_playback alc880_pcm_digital_playback
-#define alc883_pcm_digital_capture alc880_pcm_digital_capture
+#define alc882_pcm_analog_playback alc880_pcm_analog_playback
+#define alc882_pcm_analog_capture alc880_pcm_analog_capture
+#define alc882_pcm_digital_playback alc880_pcm_digital_playback
+#define alc882_pcm_digital_capture alc880_pcm_digital_capture
+
+static hda_nid_t alc883_slave_dig_outs[] = {
+ ALC1200_DIGOUT_NID, 0,
+};
+
+static hda_nid_t alc1200_slave_dig_outs[] = {
+ ALC883_DIGOUT_NID, 0,
+};
/*
* configuration and preset
*/
-static const char *alc883_models[ALC883_MODEL_LAST] = {
- [ALC883_3ST_2ch_DIG] = "3stack-dig",
+static const char *alc882_models[ALC882_MODEL_LAST] = {
+ [ALC882_3ST_DIG] = "3stack-dig",
+ [ALC882_6ST_DIG] = "6stack-dig",
+ [ALC882_ARIMA] = "arima",
+ [ALC882_W2JC] = "w2jc",
+ [ALC882_TARGA] = "targa",
+ [ALC882_ASUS_A7J] = "asus-a7j",
+ [ALC882_ASUS_A7M] = "asus-a7m",
+ [ALC885_MACPRO] = "macpro",
+ [ALC885_MB5] = "mb5",
+ [ALC885_MBP3] = "mbp3",
+ [ALC885_IMAC24] = "imac24",
+ [ALC883_3ST_2ch_DIG] = "3stack-2ch-dig",
[ALC883_3ST_6ch_DIG] = "3stack-6ch-dig",
[ALC883_3ST_6ch] = "3stack-6ch",
- [ALC883_6ST_DIG] = "6stack-dig",
+ [ALC883_6ST_DIG] = "alc883-6stack-dig",
[ALC883_TARGA_DIG] = "targa-dig",
[ALC883_TARGA_2ch_DIG] = "targa-2ch-dig",
[ALC883_TARGA_8ch_DIG] = "targa-8ch-dig",
@@ -9042,11 +8346,12 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
[ALC1200_ASUS_P5Q] = "asus-p5q",
[ALC889A_MB31] = "mb31",
[ALC883_SONY_VAIO_TT] = "sony-vaio-tt",
- [ALC883_AUTO] = "auto",
+ [ALC882_AUTO] = "auto",
};
-static struct snd_pci_quirk alc883_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC883_3ST_6ch_DIG),
+static struct snd_pci_quirk alc882_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC882_6ST_DIG),
+
SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_ACER_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_ACER_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_ACER_ASPIRE),
@@ -9061,40 +8366,54 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
ALC888_ACER_ASPIRE_8930G),
SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
ALC888_ACER_ASPIRE_8930G),
- SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO),
- SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO),
+ SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC882_AUTO),
+ SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC882_AUTO),
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
- ALC888_ACER_ASPIRE_4930G),
+ ALC888_ACER_ASPIRE_6530G),
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
ALC888_ACER_ASPIRE_6530G),
/* default Acer -- disabled as it causes more problems.
* model=auto should work fine now
*/
/* SND_PCI_QUIRK_VENDOR(0x1025, "Acer laptop", ALC883_ACER), */
+
SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
+
SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP),
SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP),
SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x103c, 0x2a66, "HP Acacia", ALC888_3ST_HP),
SND_PCI_QUIRK(0x103c, 0x2a72, "HP Educ.ar", ALC888_3ST_HP),
+
+ SND_PCI_QUIRK(0x1043, 0x060d, "Asus A7J", ALC882_ASUS_A7J),
+ SND_PCI_QUIRK(0x1043, 0x1243, "Asus A7J", ALC882_ASUS_A7J),
+ SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_ASUS_A7M),
SND_PCI_QUIRK(0x1043, 0x1873, "Asus M90V", ALC888_ASUS_M90V),
+ SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_W2JC),
+ SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
+ SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1043, 0x8249, "Asus M2A-VM HDMI", ALC883_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1043, 0x8284, "Asus Z37E", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1043, 0x82fe, "Asus P5Q-EM HDMI", ALC1200_ASUS_P5Q),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601),
+
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG),
- SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1071, 0x8227, "Mitac 82801H", ALC883_MITAC),
SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD),
SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL),
SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch),
- SND_PCI_QUIRK(0x1458, 0xa002, "MSI", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
+
SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x0579, "MSI", ALC883_TARGA_2ch_DIG),
+ SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC883_TARGA_2ch_DIG),
+ SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x3729, "MSI S420", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3783, "NEC S970", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3b7f, "MSI", ALC883_TARGA_2ch_DIG),
@@ -9116,11 +8435,13 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
+
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD),
SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
+ /* SND_PCI_QUIRK(0x161f, 0x2054, "Arima W820", ALC882_ARIMA), */
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1100, "FSC AMILO Xi/Pi25xx",
ALC883_FUJITSU_PI2515),
@@ -9135,24 +8456,180 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
+
SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL),
SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL),
SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC),
SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL),
SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
- SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
- {}
-};
-static hda_nid_t alc883_slave_dig_outs[] = {
- ALC1200_DIGOUT_NID, 0,
+ {}
};
-static hda_nid_t alc1200_slave_dig_outs[] = {
- ALC883_DIGOUT_NID, 0,
+/* codec SSID table for Intel Mac */
+static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_MACPRO),
+ SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
+ SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
+ SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
+ SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
+ SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
+ /* FIXME: HP jack sense seems not working for MBP 5,1, so apparently
+ * no perfect solution yet
+ */
+ SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC885_MB5),
+ {} /* terminator */
};
-static struct alc_config_preset alc883_presets[] = {
+static struct alc_config_preset alc882_presets[] = {
+ [ALC882_3ST_DIG] = {
+ .mixers = { alc882_base_mixer },
+ .init_verbs = { alc882_base_init_verbs,
+ alc882_adc1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
+ .channel_mode = alc882_ch_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_6ST_DIG] = {
+ .mixers = { alc882_base_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs,
+ alc882_adc1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
+ .channel_mode = alc882_sixstack_modes,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_ARIMA] = {
+ .mixers = { alc882_base_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_eapd_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
+ .channel_mode = alc882_sixstack_modes,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_W2JC] = {
+ .mixers = { alc882_w2jc_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_eapd_verbs, alc880_gpio1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
+ .channel_mode = alc880_threestack_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ },
+ [ALC885_MBP3] = {
+ .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc885_mbp3_init_verbs,
+ alc880_gpio1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .channel_mode = alc885_mbp_6ch_modes,
+ .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
+ .input_mux = &alc882_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .init_hook = alc885_mbp3_init_hook,
+ },
+ [ALC885_MB5] = {
+ .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc885_mb5_init_verbs,
+ alc880_gpio1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .channel_mode = alc885_mb5_6ch_modes,
+ .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
+ .input_mux = &mb5_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ },
+ [ALC885_MACPRO] = {
+ .mixers = { alc882_macpro_mixer },
+ .init_verbs = { alc882_macpro_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
+ .channel_mode = alc882_ch_modes,
+ .input_mux = &alc882_capture_source,
+ .init_hook = alc885_macpro_init_hook,
+ },
+ [ALC885_IMAC24] = {
+ .mixers = { alc885_imac24_mixer },
+ .init_verbs = { alc885_imac24_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
+ .channel_mode = alc882_ch_modes,
+ .input_mux = &alc882_capture_source,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .init_hook = alc885_imac24_init_hook,
+ },
+ [ALC882_TARGA] = {
+ .mixers = { alc882_targa_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_targa_verbs},
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
+ .adc_nids = alc882_adc_nids,
+ .capsrc_nids = alc882_capsrc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
+ .channel_mode = alc882_3ST_6ch_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ .unsol_event = alc882_targa_unsol_event,
+ .init_hook = alc882_targa_init_hook,
+ },
+ [ALC882_ASUS_A7J] = {
+ .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_asus_a7j_verbs},
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
+ .adc_nids = alc882_adc_nids,
+ .capsrc_nids = alc882_capsrc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
+ .channel_mode = alc882_3ST_6ch_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_ASUS_A7M] = {
+ .mixers = { alc882_asus_a7m_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_eapd_verbs, alc880_gpio1_init_verbs,
+ alc882_asus_a7m_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
+ .channel_mode = alc880_threestack_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ },
[ALC883_3ST_2ch_DIG] = {
.mixers = { alc883_3ST_2ch_mixer },
.init_verbs = { alc883_init_verbs },
@@ -9317,7 +8794,7 @@ static struct alc_config_preset alc883_presets[] = {
ARRAY_SIZE(alc888_2_capture_sources),
.input_mux = alc888_acer_aspire_6530_sources,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_acer_aspire_4930g_init_hook,
+ .init_hook = alc888_acer_aspire_6530g_init_hook,
},
[ALC888_ACER_ASPIRE_8930G] = {
.mixers = { alc888_base_mixer,
@@ -9587,9 +9064,32 @@ static struct alc_config_preset alc883_presets[] = {
/*
+ * Pin config fixes
+ */
+enum {
+ PINFIX_ABIT_AW9D_MAX
+};
+
+static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
+ { 0x15, 0x01080104 }, /* side */
+ { 0x16, 0x01011012 }, /* rear */
+ { 0x17, 0x01016011 }, /* clfe */
+ { }
+};
+
+static const struct alc_pincfg *alc882_pin_fixes[] = {
+ [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
+};
+
+static struct snd_pci_quirk alc882_pinfix_tbl[] = {
+ SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
+ {}
+};
+
+/*
* BIOS auto configuration
*/
-static void alc883_auto_set_output_and_unmute(struct hda_codec *codec,
+static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
hda_nid_t nid, int pin_type,
int dac_idx)
{
@@ -9606,7 +9106,7 @@ static void alc883_auto_set_output_and_unmute(struct hda_codec *codec,
}
-static void alc883_auto_init_multi_out(struct hda_codec *codec)
+static void alc882_auto_init_multi_out(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
int i;
@@ -9615,12 +9115,12 @@ static void alc883_auto_init_multi_out(struct hda_codec *codec)
hda_nid_t nid = spec->autocfg.line_out_pins[i];
int pin_type = get_pin_type(spec->autocfg.line_out_type);
if (nid)
- alc883_auto_set_output_and_unmute(codec, nid, pin_type,
+ alc882_auto_set_output_and_unmute(codec, nid, pin_type,
i);
}
}
-static void alc883_auto_init_hp_out(struct hda_codec *codec)
+static void alc882_auto_init_hp_out(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
hda_nid_t pin;
@@ -9628,42 +9128,111 @@ static void alc883_auto_init_hp_out(struct hda_codec *codec)
pin = spec->autocfg.hp_pins[0];
if (pin) /* connect to front */
/* use dac 0 */
- alc883_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
+ alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
pin = spec->autocfg.speaker_pins[0];
if (pin)
- alc883_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
+ alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
}
-#define alc883_is_input_pin(nid) alc880_is_input_pin(nid)
-#define ALC883_PIN_CD_NID ALC880_PIN_CD_NID
-
-static void alc883_auto_init_analog_input(struct hda_codec *codec)
+static void alc882_auto_init_analog_input(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
int i;
for (i = 0; i < AUTO_PIN_LAST; i++) {
hda_nid_t nid = spec->autocfg.input_pins[i];
- if (alc883_is_input_pin(nid)) {
- alc_set_input_pin(codec, nid, i);
- if (nid != ALC883_PIN_CD_NID &&
- (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
+ if (!nid)
+ continue;
+ alc_set_input_pin(codec, nid, i);
+ if (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_OUT_MUTE);
+ }
+}
+
+static void alc882_auto_init_input_src(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ int c;
+
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
+ hda_nid_t nid = spec->capsrc_nids[c];
+ unsigned int mux_idx;
+ const struct hda_input_mux *imux;
+ int conns, mute, idx, item;
+
+ conns = snd_hda_get_connections(codec, nid, conn_list,
+ ARRAY_SIZE(conn_list));
+ if (conns < 0)
+ continue;
+ mux_idx = c >= spec->num_mux_defs ? 0 : c;
+ imux = &spec->input_mux[mux_idx];
+ for (idx = 0; idx < conns; idx++) {
+ /* if the current connection is the selected one,
+ * unmute it as default - otherwise mute it
+ */
+ mute = AMP_IN_MUTE(idx);
+ for (item = 0; item < imux->num_items; item++) {
+ if (imux->items[item].index == idx) {
+ if (spec->cur_mux[c] == item)
+ mute = AMP_IN_UNMUTE(idx);
+ break;
+ }
+ }
+ /* check if we have a selector or mixer
+ * we could check for the widget type instead, but
+ * just check for Amp-In presence (in case of mixer
+ * without amp-in there is something wrong, this
+ * function shouldn't be used or capsrc nid is wrong)
+ */
+ if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_OUT_MUTE);
+ mute);
+ else if (mute != AMP_IN_MUTE(idx))
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ idx);
}
}
}
-#define alc883_auto_init_input_src alc882_auto_init_input_src
+/* add mic boosts if needed */
+static int alc_auto_add_mic_boost(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ int err;
+ hda_nid_t nid;
+
+ nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
+ if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
+ err = add_control(spec, ALC_CTL_WIDGET_VOL,
+ "Mic Boost",
+ HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
+ if (err < 0)
+ return err;
+ }
+ nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
+ if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
+ err = add_control(spec, ALC_CTL_WIDGET_VOL,
+ "Front Mic Boost",
+ HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
/* almost identical with ALC880 parser... */
-static int alc883_parse_auto_config(struct hda_codec *codec)
+static int alc882_parse_auto_config(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int err = alc880_parse_auto_config(codec);
- struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct auto_pin_cfg *autocfg = &spec->autocfg;
+ unsigned int wcap;
int i;
+ int err = alc880_parse_auto_config(codec);
if (err < 0)
return err;
@@ -9676,43 +9245,45 @@ static int alc883_parse_auto_config(struct hda_codec *codec)
/* hack - override the init verbs */
spec->init_verbs[0] = alc883_auto_init_verbs;
+ /* if ADC 0x07 is available, initialize it, too */
+ wcap = get_wcaps(codec, 0x07);
+ wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ if (wcap == AC_WID_AUD_IN)
+ add_verb(spec, alc882_adc1_init_verbs);
- /* setup input_mux for ALC889 */
- if (codec->vendor_id == 0x10ec0889) {
- /* digital-mic input pin is excluded in alc880_auto_create..()
- * because it's under 0x18
- */
- if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
- cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
- struct hda_input_mux *imux = &spec->private_imux[0];
- for (i = 1; i < 3; i++)
- memcpy(&spec->private_imux[i],
- &spec->private_imux[0],
- sizeof(spec->private_imux[0]));
- imux->items[imux->num_items].label = "Int DMic";
- imux->items[imux->num_items].index = 0x0b;
- imux->num_items++;
- spec->num_mux_defs = 3;
- spec->input_mux = spec->private_imux;
- }
+ /* digital-mic input pin is excluded in alc880_auto_create..()
+ * because it's under 0x18
+ */
+ if (autocfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
+ autocfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
+ struct hda_input_mux *imux = &spec->private_imux[0];
+ for (i = 1; i < 3; i++)
+ memcpy(&spec->private_imux[i],
+ &spec->private_imux[0],
+ sizeof(spec->private_imux[0]));
+ imux->items[imux->num_items].label = "Int DMic";
+ imux->items[imux->num_items].index = 0x0b;
+ imux->num_items++;
+ spec->num_mux_defs = 3;
+ spec->input_mux = spec->private_imux;
}
return 1; /* config found */
}
/* additional initialization for auto-configuration model */
-static void alc883_auto_init(struct hda_codec *codec)
+static void alc882_auto_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- alc883_auto_init_multi_out(codec);
- alc883_auto_init_hp_out(codec);
- alc883_auto_init_analog_input(codec);
- alc883_auto_init_input_src(codec);
+ alc882_auto_init_multi_out(codec);
+ alc882_auto_init_hp_out(codec);
+ alc882_auto_init_analog_input(codec);
+ alc882_auto_init_input_src(codec);
if (spec->unsol_event)
alc_inithook(codec);
}
-static int patch_alc883(struct hda_codec *codec)
+static int patch_alc882(struct hda_codec *codec)
{
struct alc_spec *spec;
int err, board_config;
@@ -9723,28 +9294,36 @@ static int patch_alc883(struct hda_codec *codec)
codec->spec = spec;
- alc_fix_pll_init(codec, 0x20, 0x0a, 10);
+ switch (codec->vendor_id) {
+ case 0x10ec0882:
+ case 0x10ec0885:
+ break;
+ default:
+ /* ALC883 and variants */
+ alc_fix_pll_init(codec, 0x20, 0x0a, 10);
+ break;
+ }
- board_config = snd_hda_check_board_config(codec, ALC883_MODEL_LAST,
- alc883_models,
- alc883_cfg_tbl);
- if (board_config < 0 || board_config >= ALC883_MODEL_LAST) {
- /* Pick up systems that don't supply PCI SSID */
- switch (codec->subsystem_id) {
- case 0x106b3600: /* Macbook 3.1 */
- board_config = ALC889A_MB31;
- break;
- default:
- printk(KERN_INFO
- "hda_codec: Unknown model for %s, trying "
- "auto-probe from BIOS...\n", codec->chip_name);
- board_config = ALC883_AUTO;
- }
+ board_config = snd_hda_check_board_config(codec, ALC882_MODEL_LAST,
+ alc882_models,
+ alc882_cfg_tbl);
+
+ if (board_config < 0 || board_config >= ALC882_MODEL_LAST)
+ board_config = snd_hda_check_board_codec_sid_config(codec,
+ ALC882_MODEL_LAST, alc882_models, alc882_ssid_cfg_tbl);
+
+ if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
+ printk(KERN_INFO "hda_codec: Unknown model for %s, "
+ "trying auto-probe from BIOS...\n",
+ codec->chip_name);
+ board_config = ALC882_AUTO;
}
- if (board_config == ALC883_AUTO) {
+ alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
+
+ if (board_config == ALC882_AUTO) {
/* automatic parse from the BIOS config */
- err = alc883_parse_auto_config(codec);
+ err = alc882_parse_auto_config(codec);
if (err < 0) {
alc_free(codec);
return err;
@@ -9752,7 +9331,7 @@ static int patch_alc883(struct hda_codec *codec)
printk(KERN_INFO
"hda_codec: Cannot set up configuration "
"from BIOS. Using base mode...\n");
- board_config = ALC883_3ST_2ch_DIG;
+ board_config = ALC882_3ST_DIG;
}
}
@@ -9762,63 +9341,61 @@ static int patch_alc883(struct hda_codec *codec)
return err;
}
- if (board_config != ALC883_AUTO)
- setup_preset(spec, &alc883_presets[board_config]);
+ if (board_config != ALC882_AUTO)
+ setup_preset(spec, &alc882_presets[board_config]);
- switch (codec->vendor_id) {
- case 0x10ec0888:
- if (!spec->num_adc_nids) {
- spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids);
- spec->adc_nids = alc883_adc_nids;
- }
- if (!spec->capsrc_nids)
- spec->capsrc_nids = alc883_capsrc_nids;
+ spec->stream_analog_playback = &alc882_pcm_analog_playback;
+ spec->stream_analog_capture = &alc882_pcm_analog_capture;
+ /* FIXME: setup DAC5 */
+ /*spec->stream_analog_alt_playback = &alc880_pcm_analog_alt_playback;*/
+ spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
+
+ spec->stream_digital_playback = &alc882_pcm_digital_playback;
+ spec->stream_digital_capture = &alc882_pcm_digital_capture;
+
+ if (codec->vendor_id == 0x10ec0888)
spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */
- break;
- case 0x10ec0889:
- if (!spec->num_adc_nids) {
- spec->num_adc_nids = ARRAY_SIZE(alc889_adc_nids);
- spec->adc_nids = alc889_adc_nids;
- }
- if (!spec->capsrc_nids)
- spec->capsrc_nids = alc889_capsrc_nids;
- break;
- default:
- if (!spec->num_adc_nids) {
- spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids);
- spec->adc_nids = alc883_adc_nids;
+
+ if (!spec->adc_nids && spec->input_mux) {
+ int i;
+ spec->num_adc_nids = 0;
+ for (i = 0; i < ARRAY_SIZE(alc882_adc_nids); i++) {
+ hda_nid_t cap;
+ hda_nid_t nid = alc882_adc_nids[i];
+ unsigned int wcap = get_wcaps(codec, nid);
+ /* get type */
+ wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ if (wcap != AC_WID_AUD_IN)
+ continue;
+ spec->private_adc_nids[spec->num_adc_nids] = nid;
+ err = snd_hda_get_connections(codec, nid, &cap, 1);
+ if (err < 0)
+ continue;
+ spec->private_capsrc_nids[spec->num_adc_nids] = cap;
+ spec->num_adc_nids++;
}
- if (!spec->capsrc_nids)
- spec->capsrc_nids = alc883_capsrc_nids;
- break;
+ spec->adc_nids = spec->private_adc_nids;
+ spec->capsrc_nids = spec->private_capsrc_nids;
}
- spec->stream_analog_playback = &alc883_pcm_analog_playback;
- spec->stream_analog_capture = &alc883_pcm_analog_capture;
- spec->stream_analog_alt_capture = &alc883_pcm_analog_alt_capture;
-
- spec->stream_digital_playback = &alc883_pcm_digital_playback;
- spec->stream_digital_capture = &alc883_pcm_digital_capture;
-
- if (!spec->cap_mixer)
- set_capture_mixer(spec);
+ set_capture_mixer(spec);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
spec->vmaster_nid = 0x0c;
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC883_AUTO)
- spec->init_hook = alc883_auto_init;
-
+ if (board_config == ALC882_AUTO)
+ spec->init_hook = alc882_auto_init;
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (!spec->loopback.amplist)
- spec->loopback.amplist = alc883_loopbacks;
+ spec->loopback.amplist = alc882_loopbacks;
#endif
codec->proc_widget_hook = print_realtek_coef;
return 0;
}
+
/*
* ALC262 support
*/
@@ -12437,6 +12014,8 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
+ alc_ssid_check(codec, 0x15, 0x1b, 0x14);
+
return 1;
}
@@ -12848,20 +12427,11 @@ static struct snd_kcontrol_new alc269_lifebook_mixer[] = {
{ }
};
-/* bind volumes of both NID 0x0c and 0x0d */
-static struct hda_bind_ctls alc269_epc_bind_vol = {
- .ops = &snd_hda_bind_vol,
- .values = {
- HDA_COMPOSE_AMP_VAL(0x02, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(0x03, 3, 0, HDA_OUTPUT),
- 0
- },
-};
-
static struct snd_kcontrol_new alc269_eeepc_mixer[] = {
- HDA_CODEC_MUTE("iSpeaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
- HDA_BIND_VOL("LineOut Playback Volume", &alc269_epc_bind_vol),
- HDA_CODEC_MUTE("LineOut Playback Switch", 0x15, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Speaker Playback Volume", 0x02, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
{ } /* end */
};
@@ -12874,12 +12444,7 @@ static struct snd_kcontrol_new alc269_epc_capture_mixer[] = {
};
/* FSC amilo */
-static struct snd_kcontrol_new alc269_fujitsu_mixer[] = {
- HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
- HDA_BIND_VOL("PCM Playback Volume", &alc269_epc_bind_vol),
- { } /* end */
-};
+#define alc269_fujitsu_mixer alc269_eeepc_mixer
static struct hda_verb alc269_quanta_fl1_verbs[] = {
{0x15, AC_VERB_SET_CONNECT_SEL, 0x01},
@@ -13345,6 +12910,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
if (!spec->cap_mixer && !spec->no_analog)
set_capture_mixer(spec);
+ alc_ssid_check(codec, 0x15, 0x1b, 0x14);
+
return 1;
}
@@ -17516,23 +17083,23 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0861, .name = "ALC861", .patch = patch_alc861 },
{ .id = 0x10ec0862, .name = "ALC861-VD", .patch = patch_alc861vd },
{ .id = 0x10ec0662, .rev = 0x100002, .name = "ALC662 rev2",
- .patch = patch_alc883 },
+ .patch = patch_alc882 },
{ .id = 0x10ec0662, .rev = 0x100101, .name = "ALC662 rev1",
.patch = patch_alc662 },
{ .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
{ .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
{ .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
- { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc883 },
+ { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
{ .id = 0x10ec0885, .rev = 0x100101, .name = "ALC889A",
- .patch = patch_alc882 }, /* should be patch_alc883() in future */
+ .patch = patch_alc882 },
{ .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A",
- .patch = patch_alc882 }, /* should be patch_alc883() in future */
+ .patch = patch_alc882 },
{ .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 },
- { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc883 },
+ { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 },
{ .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200",
- .patch = patch_alc883 },
- { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc883 },
- { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc883 },
+ .patch = patch_alc882 },
+ { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc882 },
+ { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 14f3c3e0f62d..41b5b3a18c1e 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1590,8 +1590,6 @@ static struct snd_pci_quirk stac9200_cfg_tbl[] = {
/* SigmaTel reference board */
SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
"DFI LanParty", STAC_REF),
- SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xfb30,
- "SigmaTel",STAC_9205_REF),
SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
"DFI LanParty", STAC_REF),
/* Dell laptops have BIOS problem */
@@ -2344,6 +2342,8 @@ static struct snd_pci_quirk stac9205_cfg_tbl[] = {
/* SigmaTel reference board */
SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
"DFI LanParty", STAC_9205_REF),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0xfb30,
+ "SigmaTel", STAC_9205_REF),
SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
"DFI LanParty", STAC_9205_REF),
/* Dell */
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 0d0cdbdb4486..cecf1ffeeaaa 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -107,7 +107,7 @@ MODULE_PARM_DESC(dxr_enable, "Enable DXR support for Terratec DMX6FIRE.");
static const struct pci_device_id snd_ice1712_ids[] = {
- { PCI_VENDOR_ID_ICE, PCI_DEVICE_ID_ICE_1712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICE1712 */
+ { PCI_VDEVICE(ICE, PCI_DEVICE_ID_ICE_1712), 0 }, /* ICE1712 */
{ 0, }
};
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index adc909ec125c..9da2dae64c5b 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -379,6 +379,15 @@ struct snd_ice1712 {
unsigned char (*set_mclk)(struct snd_ice1712 *ice, unsigned int rate);
void (*set_spdif_clock)(struct snd_ice1712 *ice);
+#ifdef CONFIG_PM
+ int (*pm_suspend)(struct snd_ice1712 *);
+ int (*pm_resume)(struct snd_ice1712 *);
+ int pm_suspend_enabled:1;
+ int pm_saved_is_spdif_master:1;
+ unsigned int pm_saved_spdif_ctrl;
+ unsigned char pm_saved_spdif_cfg;
+ unsigned int pm_saved_route;
+#endif
};
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 36ade77cf371..af6e00148621 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -93,7 +93,7 @@ MODULE_PARM_DESC(model, "Use the given board model.");
/* Both VT1720 and VT1724 have the same PCI IDs */
static const struct pci_device_id snd_vt1724_ids[] = {
- { PCI_VENDOR_ID_ICE, PCI_DEVICE_ID_VT1724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724), 0 },
{ 0, }
};
@@ -560,6 +560,7 @@ static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&ice->reg_lock);
old = inb(ICEMT1724(ice, DMA_CONTROL));
if (cmd == SNDRV_PCM_TRIGGER_START)
@@ -570,6 +571,10 @@ static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
spin_unlock(&ice->reg_lock);
break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* apps will have to restart stream */
+ break;
+
default:
return -EINVAL;
}
@@ -2262,7 +2267,7 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
-static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice)
+static void snd_vt1724_chip_reset(struct snd_ice1712 *ice)
{
outb(VT1724_RESET , ICEREG1724(ice, CONTROL));
inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
@@ -2272,7 +2277,7 @@ static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice)
msleep(10);
}
-static int __devinit snd_vt1724_chip_init(struct snd_ice1712 *ice)
+static int snd_vt1724_chip_init(struct snd_ice1712 *ice)
{
outb(ice->eeprom.data[ICE_EEP2_SYSCONF], ICEREG1724(ice, SYS_CFG));
outb(ice->eeprom.data[ICE_EEP2_ACLINK], ICEREG1724(ice, AC97_CFG));
@@ -2287,6 +2292,14 @@ static int __devinit snd_vt1724_chip_init(struct snd_ice1712 *ice)
outb(0, ICEREG1724(ice, POWERDOWN));
+ /* MPU_RX and TX irq masks are cleared later dynamically */
+ outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
+
+ /* don't handle FIFO overrun/underruns (just yet),
+ * since they cause machine lockups
+ */
+ outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
+
return 0;
}
@@ -2431,6 +2444,8 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
snd_vt1724_proc_init(ice);
synchronize_irq(pci->irq);
+ card->private_data = ice;
+
err = pci_request_regions(pci, "ICE1724");
if (err < 0) {
kfree(ice);
@@ -2459,14 +2474,6 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
return -EIO;
}
- /* MPU_RX and TX irq masks are cleared later dynamically */
- outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
-
- /* don't handle FIFO overrun/underruns (just yet),
- * since they cause machine lockups
- */
- outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
-
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ice, &ops);
if (err < 0) {
snd_vt1724_free(ice);
@@ -2650,11 +2657,96 @@ static void __devexit snd_vt1724_remove(struct pci_dev *pci)
pci_set_drvdata(pci, NULL);
}
+#ifdef CONFIG_PM
+static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_ice1712 *ice = card->private_data;
+
+ if (!ice->pm_suspend_enabled)
+ return 0;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+
+ snd_pcm_suspend_all(ice->pcm);
+ snd_pcm_suspend_all(ice->pcm_pro);
+ snd_pcm_suspend_all(ice->pcm_ds);
+ snd_ac97_suspend(ice->ac97);
+
+ spin_lock_irq(&ice->reg_lock);
+ ice->pm_saved_is_spdif_master = ice->is_spdif_master(ice);
+ ice->pm_saved_spdif_ctrl = inw(ICEMT1724(ice, SPDIF_CTRL));
+ ice->pm_saved_spdif_cfg = inb(ICEREG1724(ice, SPDIF_CFG));
+ ice->pm_saved_route = inl(ICEMT1724(ice, ROUTE_PLAYBACK));
+ spin_unlock_irq(&ice->reg_lock);
+
+ if (ice->pm_suspend)
+ ice->pm_suspend(ice);
+
+ pci_disable_device(pci);
+ pci_save_state(pci);
+ pci_set_power_state(pci, pci_choose_state(pci, state));
+ return 0;
+}
+
+static int snd_vt1724_resume(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_ice1712 *ice = card->private_data;
+
+ if (!ice->pm_suspend_enabled)
+ return 0;
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+
+ if (pci_enable_device(pci) < 0) {
+ snd_card_disconnect(card);
+ return -EIO;
+ }
+
+ pci_set_master(pci);
+
+ snd_vt1724_chip_reset(ice);
+
+ if (snd_vt1724_chip_init(ice) < 0) {
+ snd_card_disconnect(card);
+ return -EIO;
+ }
+
+ if (ice->pm_resume)
+ ice->pm_resume(ice);
+
+ if (ice->pm_saved_is_spdif_master) {
+ /* switching to external clock via SPDIF */
+ ice->set_spdif_clock(ice);
+ } else {
+ /* internal on-card clock */
+ snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1);
+ }
+
+ update_spdif_bits(ice, ice->pm_saved_spdif_ctrl);
+
+ outb(ice->pm_saved_spdif_cfg, ICEREG1724(ice, SPDIF_CFG));
+ outl(ice->pm_saved_route, ICEMT1724(ice, ROUTE_PLAYBACK));
+
+ if (ice->ac97)
+ snd_ac97_resume(ice->ac97);
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ return 0;
+}
+#endif
+
static struct pci_driver driver = {
.name = "ICE1724",
.id_table = snd_vt1724_ids,
.probe = snd_vt1724_probe,
.remove = __devexit_p(snd_vt1724_remove),
+#ifdef CONFIG_PM
+ .suspend = snd_vt1724_suspend,
+ .resume = snd_vt1724_resume,
+#endif
};
static int __init alsa_card_ice1724_init(void)
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 043a93879bd5..c75515f5be6f 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -1077,7 +1077,7 @@ static int __devinit prodigy_hifi_init(struct snd_ice1712 *ice)
/*
* initialize the chip
*/
-static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
+static void ak4396_init(struct snd_ice1712 *ice)
{
static unsigned short ak4396_inits[] = {
AK4396_CTRL1, 0x87, /* I2S Normal Mode, 24 bit */
@@ -1087,9 +1087,37 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
AK4396_RCH_ATT, 0x00,
};
- struct prodigy_hifi_spec *spec;
unsigned int i;
+ /* initialize ak4396 codec */
+ /* reset codec */
+ ak4396_write(ice, AK4396_CTRL1, 0x86);
+ msleep(100);
+ ak4396_write(ice, AK4396_CTRL1, 0x87);
+
+ for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2)
+ ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]);
+}
+
+#ifdef CONFIG_PM
+static int __devinit prodigy_hd2_resume(struct snd_ice1712 *ice)
+{
+ /* initialize ak4396 codec and restore previous mixer volumes */
+ struct prodigy_hifi_spec *spec = ice->spec;
+ int i;
+ mutex_lock(&ice->gpio_mutex);
+ ak4396_init(ice);
+ for (i = 0; i < 2; i++)
+ ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff);
+ mutex_unlock(&ice->gpio_mutex);
+ return 0;
+}
+#endif
+
+static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
+{
+ struct prodigy_hifi_spec *spec;
+
ice->vt1720 = 0;
ice->vt1724 = 1;
@@ -1112,14 +1140,12 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
return -ENOMEM;
ice->spec = spec;
- /* initialize ak4396 codec */
- /* reset codec */
- ak4396_write(ice, AK4396_CTRL1, 0x86);
- msleep(100);
- ak4396_write(ice, AK4396_CTRL1, 0x87);
-
- for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2)
- ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]);
+#ifdef CONFIG_PM
+ ice->pm_resume = &prodigy_hd2_resume;
+ ice->pm_suspend_enabled = 1;
+#endif
+
+ ak4396_init(ice);
return 0;
}
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 8aa5687f392a..171ada535209 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -421,29 +421,29 @@ struct intel8x0 {
};
static struct pci_device_id snd_intel8x0_ids[] = {
- { 0x8086, 0x2415, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801AA */
- { 0x8086, 0x2425, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82901AB */
- { 0x8086, 0x2445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801BA */
- { 0x8086, 0x2485, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH3 */
- { 0x8086, 0x24c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH4 */
- { 0x8086, 0x24d5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH5 */
- { 0x8086, 0x25a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ESB */
- { 0x8086, 0x266e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH6 */
- { 0x8086, 0x27de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH7 */
- { 0x8086, 0x2698, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ESB2 */
- { 0x8086, 0x7195, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 440MX */
- { 0x1039, 0x7012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_SIS }, /* SI7012 */
- { 0x10de, 0x01b1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE */
- { 0x10de, 0x003a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* MCP04 */
- { 0x10de, 0x006a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2 */
- { 0x10de, 0x0059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK804 */
- { 0x10de, 0x008a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK8 */
- { 0x10de, 0x00da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE3 */
- { 0x10de, 0x00ea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK8S */
- { 0x10de, 0x026b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* MCP51 */
- { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */
- { 0x1022, 0x7445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */
- { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */
+ { PCI_VDEVICE(INTEL, 0x2415), DEVICE_INTEL }, /* 82801AA */
+ { PCI_VDEVICE(INTEL, 0x2425), DEVICE_INTEL }, /* 82901AB */
+ { PCI_VDEVICE(INTEL, 0x2445), DEVICE_INTEL }, /* 82801BA */
+ { PCI_VDEVICE(INTEL, 0x2485), DEVICE_INTEL }, /* ICH3 */
+ { PCI_VDEVICE(INTEL, 0x24c5), DEVICE_INTEL_ICH4 }, /* ICH4 */
+ { PCI_VDEVICE(INTEL, 0x24d5), DEVICE_INTEL_ICH4 }, /* ICH5 */
+ { PCI_VDEVICE(INTEL, 0x25a6), DEVICE_INTEL_ICH4 }, /* ESB */
+ { PCI_VDEVICE(INTEL, 0x266e), DEVICE_INTEL_ICH4 }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x27de), DEVICE_INTEL_ICH4 }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x2698), DEVICE_INTEL_ICH4 }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x7195), DEVICE_INTEL }, /* 440MX */
+ { PCI_VDEVICE(SI, 0x7012), DEVICE_SIS }, /* SI7012 */
+ { PCI_VDEVICE(NVIDIA, 0x01b1), DEVICE_NFORCE }, /* NFORCE */
+ { PCI_VDEVICE(NVIDIA, 0x003a), DEVICE_NFORCE }, /* MCP04 */
+ { PCI_VDEVICE(NVIDIA, 0x006a), DEVICE_NFORCE }, /* NFORCE2 */
+ { PCI_VDEVICE(NVIDIA, 0x0059), DEVICE_NFORCE }, /* CK804 */
+ { PCI_VDEVICE(NVIDIA, 0x008a), DEVICE_NFORCE }, /* CK8 */
+ { PCI_VDEVICE(NVIDIA, 0x00da), DEVICE_NFORCE }, /* NFORCE3 */
+ { PCI_VDEVICE(NVIDIA, 0x00ea), DEVICE_NFORCE }, /* CK8S */
+ { PCI_VDEVICE(NVIDIA, 0x026b), DEVICE_NFORCE }, /* MCP51 */
+ { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
+ { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */
+ { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
{ 0, }
};
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 6ec0fc50d6be..9e7d12e7673f 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -220,24 +220,24 @@ struct intel8x0m {
};
static struct pci_device_id snd_intel8x0m_ids[] = {
- { 0x8086, 0x2416, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801AA */
- { 0x8086, 0x2426, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82901AB */
- { 0x8086, 0x2446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801BA */
- { 0x8086, 0x2486, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH3 */
- { 0x8086, 0x24c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH4 */
- { 0x8086, 0x24d6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH5 */
- { 0x8086, 0x266d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH6 */
- { 0x8086, 0x27dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH7 */
- { 0x8086, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 440MX */
- { 0x1022, 0x7446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */
- { 0x1039, 0x7013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_SIS }, /* SI7013 */
- { 0x10de, 0x01c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE */
- { 0x10de, 0x0069, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2 */
- { 0x10de, 0x0089, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2s */
- { 0x10de, 0x00d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE3 */
+ { PCI_VDEVICE(INTEL, 0x2416), DEVICE_INTEL }, /* 82801AA */
+ { PCI_VDEVICE(INTEL, 0x2426), DEVICE_INTEL }, /* 82901AB */
+ { PCI_VDEVICE(INTEL, 0x2446), DEVICE_INTEL }, /* 82801BA */
+ { PCI_VDEVICE(INTEL, 0x2486), DEVICE_INTEL }, /* ICH3 */
+ { PCI_VDEVICE(INTEL, 0x24c6), DEVICE_INTEL }, /* ICH4 */
+ { PCI_VDEVICE(INTEL, 0x24d6), DEVICE_INTEL }, /* ICH5 */
+ { PCI_VDEVICE(INTEL, 0x266d), DEVICE_INTEL }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x27dd), DEVICE_INTEL }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x7196), DEVICE_INTEL }, /* 440MX */
+ { PCI_VDEVICE(AMD, 0x7446), DEVICE_INTEL }, /* AMD768 */
+ { PCI_VDEVICE(SI, 0x7013), DEVICE_SIS }, /* SI7013 */
+ { PCI_VDEVICE(NVIDIA, 0x01c1), DEVICE_NFORCE }, /* NFORCE */
+ { PCI_VDEVICE(NVIDIA, 0x0069), DEVICE_NFORCE }, /* NFORCE2 */
+ { PCI_VDEVICE(NVIDIA, 0x0089), DEVICE_NFORCE }, /* NFORCE2s */
+ { PCI_VDEVICE(NVIDIA, 0x00d9), DEVICE_NFORCE }, /* NFORCE3 */
#if 0
- { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */
- { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */
+ { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
+ { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
#endif
{ 0, }
};
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 18da2ef04d09..11b8c6514b3d 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -654,13 +654,12 @@ static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
int i;
u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES);
- u32 default_conf_es = (64 << IOCR_OUTPUTS_OFFSET) |
+ /* configure 64 io channels */
+ u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK) |
(64 << IOCR_INPUTS_OFFSET) |
+ (64 << IOCR_OUTPUTS_OFFSET) |
(FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET);
- u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK)
- | (default_conf_es & CONFES_WRITE_PART_MASK);
-
snd_printdd("->lx_init_ethersound\n");
chip->freq_ratio = FREQ_RATIO_SINGLE_MODE;
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 82bc5b9e7629..a83d1968a845 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard.");
*/
static struct pci_device_id snd_mixart_ids[] = {
- { 0x1057, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* MC8240 */
+ { PCI_VDEVICE(MOTOROLA, 0x0003), 0, }, /* MC8240 */
{ 0, }
};
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 522a040855d4..97a0731331a1 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -263,9 +263,9 @@ struct nm256 {
* PCI ids
*/
static struct pci_device_id snd_nm256_ids[] = {
- {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO), 0},
+ {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO), 0},
+ {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO), 0},
{0,},
};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 304da169bfdc..5401c547c4e3 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -575,8 +575,10 @@ static int ac97_switch_put(struct snd_kcontrol *ctl,
static int ac97_volume_info(struct snd_kcontrol *ctl,
struct snd_ctl_elem_info *info)
{
+ int stereo = (ctl->private_value >> 16) & 1;
+
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- info->count = 2;
+ info->count = stereo ? 2 : 1;
info->value.integer.min = 0;
info->value.integer.max = 0x1f;
return 0;
@@ -587,6 +589,7 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
{
struct oxygen *chip = ctl->private_data;
unsigned int codec = (ctl->private_value >> 24) & 1;
+ int stereo = (ctl->private_value >> 16) & 1;
unsigned int index = ctl->private_value & 0xff;
u16 reg;
@@ -594,7 +597,8 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
reg = oxygen_read_ac97(chip, codec, index);
mutex_unlock(&chip->mutex);
value->value.integer.value[0] = 31 - (reg & 0x1f);
- value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f);
+ if (stereo)
+ value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f);
return 0;
}
@@ -603,6 +607,7 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
{
struct oxygen *chip = ctl->private_data;
unsigned int codec = (ctl->private_value >> 24) & 1;
+ int stereo = (ctl->private_value >> 16) & 1;
unsigned int index = ctl->private_value & 0xff;
u16 oldreg, newreg;
int change;
@@ -612,8 +617,11 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
newreg = oldreg;
newreg = (newreg & ~0x1f) |
(31 - (value->value.integer.value[0] & 0x1f));
- newreg = (newreg & ~0x1f00) |
- ((31 - (value->value.integer.value[0] & 0x1f)) << 8);
+ if (stereo)
+ newreg = (newreg & ~0x1f00) |
+ ((31 - (value->value.integer.value[1] & 0x1f)) << 8);
+ else
+ newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8);
change = newreg != oldreg;
if (change)
oxygen_write_ac97(chip, codec, index, newreg);
@@ -673,7 +681,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
.private_value = ((codec) << 24) | ((invert) << 16) | \
((bitnr) << 8) | (index), \
}
-#define AC97_VOLUME(xname, codec, index) { \
+#define AC97_VOLUME(xname, codec, index, stereo) { \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
@@ -682,7 +690,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
.get = ac97_volume_get, \
.put = ac97_volume_put, \
.tlv = { .p = ac97_db_scale, }, \
- .private_value = ((codec) << 24) | (index), \
+ .private_value = ((codec) << 24) | ((stereo) << 16) | (index), \
}
static DECLARE_TLV_DB_SCALE(monitor_db_scale, -1000, 1000, 0);
@@ -882,18 +890,18 @@ static const struct {
};
static const struct snd_kcontrol_new ac97_controls[] = {
- AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC),
+ AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0),
AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1),
AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0),
AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1),
- AC97_VOLUME("CD Capture Volume", 0, AC97_CD),
+ AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1),
AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1),
- AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX),
+ AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX, 1),
AC97_SWITCH("Aux Capture Switch", 0, AC97_AUX, 15, 1),
};
static const struct snd_kcontrol_new ac97_fp_controls[] = {
- AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE),
+ AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE, 1),
AC97_SWITCH("Front Panel Playback Switch", 1, AC97_HEADPHONE, 15, 1),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index bf971f7cfdc6..6ebcb6bdd712 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -635,6 +635,8 @@ static void xonar_d2_resume(struct oxygen *chip)
static void xonar_d1_resume(struct oxygen *chip)
{
+ oxygen_set_bits8(chip, OXYGEN_FUNCTION, OXYGEN_FUNCTION_RESET_CODEC);
+ msleep(1);
cs43xx_init(chip);
xonar_enable_output(chip);
}
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index d7b966e7c4cf..f977dba7cbd0 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -227,12 +227,9 @@ struct rme32 {
};
static struct pci_device_id snd_rme32_ids[] = {
- {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
- {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
- {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32), 0,},
+ {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8), 0,},
+ {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO), 0,},
{0,}
};
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 55fb1c131f58..2ba5c0fd55db 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -232,14 +232,10 @@ struct rme96 {
};
static struct pci_device_id snd_rme96_ids[] = {
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96), 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8), 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO), 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST), 0, },
{ 0, }
};
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 7dc60ad4772e..1f6406c4534d 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -243,7 +243,7 @@ struct sonicvibes {
};
static struct pci_device_id snd_sonic_ids[] = {
- { 0x5333, 0xca00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(S3, 0xca00), 0, },
{ 0, }
};
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 949fcaf6b70e..acfa4760da49 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -402,9 +402,9 @@ struct via82xx {
static struct pci_device_id snd_via82xx_ids[] = {
/* 0x1106, 0x3058 */
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA686, }, /* 686A */
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686_5), TYPE_CARD_VIA686, }, /* 686A */
/* 0x1106, 0x3059 */
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA8233, }, /* VT8233 */
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_5), TYPE_CARD_VIA8233, }, /* VT8233 */
{ 0, }
};
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 0d54e3503c1e..47eb61561dfc 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -261,7 +261,7 @@ struct via82xx_modem {
};
static struct pci_device_id snd_via82xx_modem_ids[] = {
- { 0x1106, 0x3068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA82XX_MODEM, },
+ { PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, },
{ 0, }
};
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index 4af66661f9b0..e6b18b90d451 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -67,12 +67,12 @@ module_param_array(rear_switch, bool, NULL, 0444);
MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch");
static struct pci_device_id snd_ymfpci_ids[] = {
- { 0x1073, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF724 */
- { 0x1073, 0x000d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF724F */
- { 0x1073, 0x000a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF740 */
- { 0x1073, 0x000c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF740C */
- { 0x1073, 0x0010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF744 */
- { 0x1073, 0x0012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF754 */
+ { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */
+ { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */
+ { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */
+ { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */
+ { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */
+ { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */
{ 0, }
};
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index edfbdc024e66..9825b71d0e28 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -203,23 +203,23 @@ static struct snd_soc_device bf5xx_ad73311_snd_devdata = {
.codec_dev = &soc_codec_dev_ad73311,
};
-static struct platform_device *bf52x_ad73311_snd_device;
+static struct platform_device *bf5xx_ad73311_snd_device;
static int __init bf5xx_ad73311_init(void)
{
int ret;
pr_debug("%s enter\n", __func__);
- bf52x_ad73311_snd_device = platform_device_alloc("soc-audio", -1);
- if (!bf52x_ad73311_snd_device)
+ bf5xx_ad73311_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bf5xx_ad73311_snd_device)
return -ENOMEM;
- platform_set_drvdata(bf52x_ad73311_snd_device, &bf5xx_ad73311_snd_devdata);
- bf5xx_ad73311_snd_devdata.dev = &bf52x_ad73311_snd_device->dev;
- ret = platform_device_add(bf52x_ad73311_snd_device);
+ platform_set_drvdata(bf5xx_ad73311_snd_device, &bf5xx_ad73311_snd_devdata);
+ bf5xx_ad73311_snd_devdata.dev = &bf5xx_ad73311_snd_device->dev;
+ ret = platform_device_add(bf5xx_ad73311_snd_device);
if (ret)
- platform_device_put(bf52x_ad73311_snd_device);
+ platform_device_put(bf5xx_ad73311_snd_device);
return ret;
}
@@ -227,7 +227,7 @@ static int __init bf5xx_ad73311_init(void)
static void __exit bf5xx_ad73311_exit(void)
{
pr_debug("%s enter\n", __func__);
- platform_device_unregister(bf52x_ad73311_snd_device);
+ platform_device_unregister(bf5xx_ad73311_snd_device);
}
module_init(bf5xx_ad73311_init);
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index bc0cdded7116..3a00fa4dbe6d 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -148,24 +148,24 @@ static struct snd_soc_device bf5xx_ssm2602_snd_devdata = {
.codec_data = &bf5xx_ssm2602_setup,
};
-static struct platform_device *bf52x_ssm2602_snd_device;
+static struct platform_device *bf5xx_ssm2602_snd_device;
static int __init bf5xx_ssm2602_init(void)
{
int ret;
pr_debug("%s enter\n", __func__);
- bf52x_ssm2602_snd_device = platform_device_alloc("soc-audio", -1);
- if (!bf52x_ssm2602_snd_device)
+ bf5xx_ssm2602_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bf5xx_ssm2602_snd_device)
return -ENOMEM;
- platform_set_drvdata(bf52x_ssm2602_snd_device,
+ platform_set_drvdata(bf5xx_ssm2602_snd_device,
&bf5xx_ssm2602_snd_devdata);
- bf5xx_ssm2602_snd_devdata.dev = &bf52x_ssm2602_snd_device->dev;
- ret = platform_device_add(bf52x_ssm2602_snd_device);
+ bf5xx_ssm2602_snd_devdata.dev = &bf5xx_ssm2602_snd_device->dev;
+ ret = platform_device_add(bf5xx_ssm2602_snd_device);
if (ret)
- platform_device_put(bf52x_ssm2602_snd_device);
+ platform_device_put(bf5xx_ssm2602_snd_device);
return ret;
}
@@ -173,7 +173,7 @@ static int __init bf5xx_ssm2602_init(void)
static void __exit bf5xx_ssm2602_exit(void)
{
pr_debug("%s enter\n", __func__);
- platform_device_unregister(bf52x_ssm2602_snd_device);
+ platform_device_unregister(bf5xx_ssm2602_snd_device);
}
module_init(bf5xx_ssm2602_init);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index bbc97fd76648..68ea5b6648df 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -30,6 +30,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8350 if MFD_WM8350
select SND_SOC_WM8400 if MFD_WM8400
select SND_SOC_WM8510 if SND_SOC_I2C_AND_SPI
+ select SND_SOC_WM8523 if I2C
select SND_SOC_WM8580 if I2C
select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
@@ -39,6 +40,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8903 if I2C
select SND_SOC_WM8940 if I2C
select SND_SOC_WM8960 if I2C
+ select SND_SOC_WM8961 if I2C
select SND_SOC_WM8971 if I2C
select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8990 if I2C
@@ -129,6 +131,9 @@ config SND_SOC_WM8400
config SND_SOC_WM8510
tristate
+config SND_SOC_WM8523
+ tristate
+
config SND_SOC_WM8580
tristate
@@ -156,6 +161,9 @@ config SND_SOC_WM8940
config SND_SOC_WM8960
tristate
+config SND_SOC_WM8961
+ tristate
+
config SND_SOC_WM8971
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 8b7530546f4d..8ce28a34e8e9 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -18,6 +18,7 @@ snd-soc-uda1380-objs := uda1380.o
snd-soc-wm8350-objs := wm8350.o
snd-soc-wm8400-objs := wm8400.o
snd-soc-wm8510-objs := wm8510.o
+snd-soc-wm8523-objs := wm8523.o
snd-soc-wm8580-objs := wm8580.o
snd-soc-wm8728-objs := wm8728.o
snd-soc-wm8731-objs := wm8731.o
@@ -27,6 +28,7 @@ snd-soc-wm8900-objs := wm8900.o
snd-soc-wm8903-objs := wm8903.o
snd-soc-wm8940-objs := wm8940.o
snd-soc-wm8960-objs := wm8960.o
+snd-soc-wm8961-objs := wm8961.o
snd-soc-wm8971-objs := wm8971.o
snd-soc-wm8988-objs := wm8988.o
snd-soc-wm8990-objs := wm8990.o
@@ -55,6 +57,7 @@ obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
obj-$(CONFIG_SND_SOC_WM8350) += snd-soc-wm8350.o
obj-$(CONFIG_SND_SOC_WM8400) += snd-soc-wm8400.o
obj-$(CONFIG_SND_SOC_WM8510) += snd-soc-wm8510.o
+obj-$(CONFIG_SND_SOC_WM8523) += snd-soc-wm8523.o
obj-$(CONFIG_SND_SOC_WM8580) += snd-soc-wm8580.o
obj-$(CONFIG_SND_SOC_WM8728) += snd-soc-wm8728.o
obj-$(CONFIG_SND_SOC_WM8731) += snd-soc-wm8731.o
@@ -65,6 +68,7 @@ obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o
obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o
obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o
obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o
+obj-$(CONFIG_SND_SOC_WM8961) += snd-soc-wm8961.o
obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o
obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o
obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 4dbb853eef5a..49ceb620678c 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -712,7 +712,19 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
reg = twl4030_read_reg_cache(w->codec, m->reg);
- if (m->reg <= TWL4030_REG_ARXR2_APGA_CTL) {
+ /*
+ * bypass_state[0:3] - analog HiFi bypass
+ * bypass_state[4] - analog voice bypass
+ * bypass_state[5] - digital voice bypass
+ * bypass_state[6:7] - digital HiFi bypass
+ */
+ if (m->reg == TWL4030_REG_VSTPGA) {
+ /* Voice digital bypass */
+ if (reg)
+ twl4030->bypass_state |= (1 << 5);
+ else
+ twl4030->bypass_state &= ~(1 << 5);
+ } else if (m->reg <= TWL4030_REG_ARXR2_APGA_CTL) {
/* Analog bypass */
if (reg & (1 << m->shift))
twl4030->bypass_state |=
@@ -726,12 +738,6 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
twl4030->bypass_state |= (1 << 4);
else
twl4030->bypass_state &= ~(1 << 4);
- } else if (m->reg == TWL4030_REG_VSTPGA) {
- /* Voice digital bypass */
- if (reg)
- twl4030->bypass_state |= (1 << 5);
- else
- twl4030->bypass_state &= ~(1 << 5);
} else {
/* Digital bypass */
if (reg & (0x7 << m->shift))
@@ -924,7 +930,7 @@ static const struct soc_enum twl4030_op_modes_enum =
ARRAY_SIZE(twl4030_op_modes_texts),
twl4030_op_modes_texts);
-int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
+static int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
@@ -1005,6 +1011,16 @@ static DECLARE_TLV_DB_SCALE(digital_capture_tlv, 0, 100, 0);
*/
static DECLARE_TLV_DB_SCALE(input_gain_tlv, 0, 600, 0);
+/* AVADC clock priority */
+static const char *twl4030_avadc_clk_priority_texts[] = {
+ "Voice high priority", "HiFi high priority"
+};
+
+static const struct soc_enum twl4030_avadc_clk_priority_enum =
+ SOC_ENUM_SINGLE(TWL4030_REG_AVADC_CTL, 2,
+ ARRAY_SIZE(twl4030_avadc_clk_priority_texts),
+ twl4030_avadc_clk_priority_texts);
+
static const char *twl4030_rampdelay_texts[] = {
"27/20/14 ms", "55/40/27 ms", "109/81/55 ms", "218/161/109 ms",
"437/323/218 ms", "874/645/437 ms", "1748/1291/874 ms",
@@ -1106,6 +1122,8 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
SOC_DOUBLE_TLV("Analog Capture Volume", TWL4030_REG_ANAMIC_GAIN,
0, 3, 5, 0, input_gain_tlv),
+ SOC_ENUM("AVADC Clock Priority", twl4030_avadc_clk_priority_enum),
+
SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum),
SOC_ENUM("Vibra H-bridge mode", twl4030_vibradirmode_enum),
@@ -1240,14 +1258,14 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
/* HandsfreeL/R */
SND_SOC_DAPM_MUX("HandsfreeL Mux", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_handsfreel_control),
- SND_SOC_DAPM_SWITCH("HandsfreeL Switch", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SWITCH("HandsfreeL", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_handsfreelmute_control),
SND_SOC_DAPM_PGA_E("HandsfreeL PGA", SND_SOC_NOPM,
0, 0, NULL, 0, handsfreelpga_event,
SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("HandsfreeR Mux", SND_SOC_NOPM, 5, 0,
&twl4030_dapm_handsfreer_control),
- SND_SOC_DAPM_SWITCH("HandsfreeR Switch", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SWITCH("HandsfreeR", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_handsfreermute_control),
SND_SOC_DAPM_PGA_E("HandsfreeR PGA", SND_SOC_NOPM,
0, 0, NULL, 0, handsfreerpga_event,
@@ -1359,15 +1377,15 @@ static const struct snd_soc_dapm_route intercon[] = {
{"HandsfreeL Mux", "AudioL1", "Analog L1 Playback Mixer"},
{"HandsfreeL Mux", "AudioL2", "Analog L2 Playback Mixer"},
{"HandsfreeL Mux", "AudioR2", "Analog R2 Playback Mixer"},
- {"HandsfreeL Switch", "Switch", "HandsfreeL Mux"},
- {"HandsfreeL PGA", NULL, "HandsfreeL Switch"},
+ {"HandsfreeL", "Switch", "HandsfreeL Mux"},
+ {"HandsfreeL PGA", NULL, "HandsfreeL"},
/* HandsfreeR */
{"HandsfreeR Mux", "Voice", "Analog Voice Playback Mixer"},
{"HandsfreeR Mux", "AudioR1", "Analog R1 Playback Mixer"},
{"HandsfreeR Mux", "AudioR2", "Analog R2 Playback Mixer"},
{"HandsfreeR Mux", "AudioL2", "Analog L2 Playback Mixer"},
- {"HandsfreeR Switch", "Switch", "HandsfreeR Mux"},
- {"HandsfreeR PGA", NULL, "HandsfreeR Switch"},
+ {"HandsfreeR", "Switch", "HandsfreeR Mux"},
+ {"HandsfreeR PGA", NULL, "HandsfreeR"},
/* Vibra */
{"Vibra Mux", "AudioL1", "DAC Left1"},
{"Vibra Mux", "AudioR1", "DAC Right1"},
@@ -1609,8 +1627,6 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
/* If the substream has 4 channel, do the necessary setup */
if (params_channels(params) == 4) {
- u8 format, mode;
-
format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE);
@@ -1948,7 +1964,7 @@ static int twl4030_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBM_CFM:
format &= ~(TWL4030_VIF_SLAVE_EN);
break;
case SND_SOC_DAIFMT_CBS_CFS:
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 5b21594e0e58..92ec03442154 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -5,9 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
- * Improved support for DAPM and audio routing/mixing capabilities,
- * added TLV support.
+ * Copyright (c) 2007-2009 Philipp Zabel <philipp.zabel@gmail.com>
*
* Modified by Richard Purdie <richard@openedhand.com> to fit into SoC
* codec model.
@@ -19,26 +17,32 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/ioctl.h>
+#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
-#include <sound/info.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
+#include <sound/uda1380.h>
#include "uda1380.h"
-static struct work_struct uda1380_work;
static struct snd_soc_codec *uda1380_codec;
+/* codec private data */
+struct uda1380_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[UDA1380_CACHEREGNUM];
+ unsigned int dac_clk;
+ struct work_struct work;
+};
+
/*
* uda1380 register cache
*/
@@ -473,6 +477,7 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
+ struct uda1380_priv *uda1380 = codec->private_data;
int mixer = uda1380_read_reg_cache(codec, UDA1380_MIXER);
switch (cmd) {
@@ -480,13 +485,13 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
uda1380_write_reg_cache(codec, UDA1380_MIXER,
mixer & ~R14_SILENCE);
- schedule_work(&uda1380_work);
+ schedule_work(&uda1380->work);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
uda1380_write_reg_cache(codec, UDA1380_MIXER,
mixer | R14_SILENCE);
- schedule_work(&uda1380_work);
+ schedule_work(&uda1380->work);
break;
}
return 0;
@@ -670,44 +675,33 @@ static int uda1380_resume(struct platform_device *pdev)
return 0;
}
-/*
- * initialise the UDA1380 driver
- * register mixer and dsp interfaces with the kernel
- */
-static int uda1380_init(struct snd_soc_device *socdev, int dac_clk)
+static int uda1380_probe(struct platform_device *pdev)
{
- struct snd_soc_codec *codec = socdev->card->codec;
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ struct uda1380_platform_data *pdata;
int ret = 0;
- codec->name = "UDA1380";
- codec->owner = THIS_MODULE;
- codec->read = uda1380_read_reg_cache;
- codec->write = uda1380_write;
- codec->set_bias_level = uda1380_set_bias_level;
- codec->dai = uda1380_dai;
- codec->num_dai = ARRAY_SIZE(uda1380_dai);
- codec->reg_cache = kmemdup(uda1380_reg, sizeof(uda1380_reg),
- GFP_KERNEL);
- if (codec->reg_cache == NULL)
- return -ENOMEM;
- codec->reg_cache_size = ARRAY_SIZE(uda1380_reg);
- codec->reg_cache_step = 1;
- uda1380_reset(codec);
+ if (uda1380_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
- uda1380_codec = codec;
- INIT_WORK(&uda1380_work, uda1380_flush_work);
+ socdev->card->codec = uda1380_codec;
+ codec = uda1380_codec;
+ pdata = codec->dev->platform_data;
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
- pr_err("uda1380: failed to create pcms\n");
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
goto pcm_err;
}
/* power on device */
uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* set clock input */
- switch (dac_clk) {
+ switch (pdata->dac_clk) {
case UDA1380_DAC_CLK_SYSCLK:
uda1380_write(codec, UDA1380_CLK, 0);
break;
@@ -716,13 +710,12 @@ static int uda1380_init(struct snd_soc_device *socdev, int dac_clk)
break;
}
- /* uda1380 init */
snd_soc_add_controls(codec, uda1380_snd_controls,
ARRAY_SIZE(uda1380_snd_controls));
uda1380_add_widgets(codec);
ret = snd_soc_init_card(socdev);
if (ret < 0) {
- pr_err("uda1380: failed to register card\n");
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
goto card_err;
}
@@ -732,165 +725,201 @@ card_err:
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
pcm_err:
- kfree(codec->reg_cache);
return ret;
}
-static struct snd_soc_device *uda1380_socdev;
-
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-
-static int uda1380_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+/* power down chip */
+static int uda1380_remove(struct platform_device *pdev)
{
- struct snd_soc_device *socdev = uda1380_socdev;
- struct uda1380_setup_data *setup = socdev->codec_data;
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- int ret;
-
- i2c_set_clientdata(i2c, codec);
- codec->control_data = i2c;
- ret = uda1380_init(socdev, setup->dac_clk);
- if (ret < 0)
- pr_err("uda1380: failed to initialise UDA1380\n");
+ if (codec->control_data)
+ uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return ret;
-}
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
-static int uda1380_i2c_remove(struct i2c_client *client)
-{
- struct snd_soc_codec *codec = i2c_get_clientdata(client);
- kfree(codec->reg_cache);
return 0;
}
-static const struct i2c_device_id uda1380_i2c_id[] = {
- { "uda1380", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id);
-
-static struct i2c_driver uda1380_i2c_driver = {
- .driver = {
- .name = "UDA1380 I2C Codec",
- .owner = THIS_MODULE,
- },
- .probe = uda1380_i2c_probe,
- .remove = uda1380_i2c_remove,
- .id_table = uda1380_i2c_id,
+struct snd_soc_codec_device soc_codec_dev_uda1380 = {
+ .probe = uda1380_probe,
+ .remove = uda1380_remove,
+ .suspend = uda1380_suspend,
+ .resume = uda1380_resume,
};
+EXPORT_SYMBOL_GPL(soc_codec_dev_uda1380);
-static int uda1380_add_i2c_device(struct platform_device *pdev,
- const struct uda1380_setup_data *setup)
+static int uda1380_register(struct uda1380_priv *uda1380)
{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- int ret;
+ int ret, i;
+ struct snd_soc_codec *codec = &uda1380->codec;
+ struct uda1380_platform_data *pdata = codec->dev->platform_data;
- ret = i2c_add_driver(&uda1380_i2c_driver);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't add i2c driver\n");
- return ret;
+ if (uda1380_codec) {
+ dev_err(codec->dev, "Another UDA1380 is registered\n");
+ return -EINVAL;
+ }
+
+ if (!pdata || !pdata->gpio_power || !pdata->gpio_reset)
+ return -EINVAL;
+
+ ret = gpio_request(pdata->gpio_power, "uda1380 power");
+ if (ret)
+ goto err_out;
+ ret = gpio_request(pdata->gpio_reset, "uda1380 reset");
+ if (ret)
+ goto err_gpio;
+
+ gpio_direction_output(pdata->gpio_power, 1);
+
+ /* we may need to have the clock running here - pH5 */
+ gpio_direction_output(pdata->gpio_reset, 1);
+ udelay(5);
+ gpio_set_value(pdata->gpio_reset, 0);
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = uda1380;
+ codec->name = "UDA1380";
+ codec->owner = THIS_MODULE;
+ codec->read = uda1380_read_reg_cache;
+ codec->write = uda1380_write;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = uda1380_set_bias_level;
+ codec->dai = uda1380_dai;
+ codec->num_dai = ARRAY_SIZE(uda1380_dai);
+ codec->reg_cache_size = ARRAY_SIZE(uda1380_reg);
+ codec->reg_cache = &uda1380->reg_cache;
+ codec->reg_cache_step = 1;
+
+ memcpy(codec->reg_cache, uda1380_reg, sizeof(uda1380_reg));
+
+ ret = uda1380_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ goto err_reset;
}
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = setup->i2c_address;
- strlcpy(info.type, "uda1380", I2C_NAME_SIZE);
+ INIT_WORK(&uda1380->work, uda1380_flush_work);
+
+ for (i = 0; i < ARRAY_SIZE(uda1380_dai); i++)
+ uda1380_dai[i].dev = codec->dev;
- adapter = i2c_get_adapter(setup->i2c_bus);
- if (!adapter) {
- dev_err(&pdev->dev, "can't get i2c adapter %d\n",
- setup->i2c_bus);
- goto err_driver;
+ uda1380_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto err_reset;
}
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- dev_err(&pdev->dev, "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- goto err_driver;
+ ret = snd_soc_register_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAIs: %d\n", ret);
+ goto err_dai;
}
return 0;
-err_driver:
- i2c_del_driver(&uda1380_i2c_driver);
- return -ENODEV;
+err_dai:
+ snd_soc_unregister_codec(codec);
+err_reset:
+ gpio_set_value(pdata->gpio_power, 0);
+ gpio_free(pdata->gpio_reset);
+err_gpio:
+ gpio_free(pdata->gpio_power);
+err_out:
+ return ret;
}
-#endif
-static int uda1380_probe(struct platform_device *pdev)
+static void uda1380_unregister(struct uda1380_priv *uda1380)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct uda1380_setup_data *setup;
+ struct snd_soc_codec *codec = &uda1380->codec;
+ struct uda1380_platform_data *pdata = codec->dev->platform_data;
+
+ snd_soc_unregister_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+ snd_soc_unregister_codec(&uda1380->codec);
+
+ gpio_set_value(pdata->gpio_power, 0);
+ gpio_free(pdata->gpio_reset);
+ gpio_free(pdata->gpio_power);
+
+ kfree(uda1380);
+ uda1380_codec = NULL;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int uda1380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct uda1380_priv *uda1380;
struct snd_soc_codec *codec;
int ret;
- setup = socdev->codec_data;
- codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
- if (codec == NULL)
+ uda1380 = kzalloc(sizeof(struct uda1380_priv), GFP_KERNEL);
+ if (uda1380 == NULL)
return -ENOMEM;
- socdev->card->codec = codec;
- mutex_init(&codec->mutex);
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
+ codec = &uda1380->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
- uda1380_socdev = socdev;
- ret = -ENODEV;
+ i2c_set_clientdata(i2c, uda1380);
+ codec->control_data = i2c;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- if (setup->i2c_address) {
- codec->hw_write = (hw_write_t)i2c_master_send;
- ret = uda1380_add_i2c_device(pdev, setup);
- }
-#endif
+ codec->dev = &i2c->dev;
+ ret = uda1380_register(uda1380);
if (ret != 0)
- kfree(codec);
+ kfree(uda1380);
+
return ret;
}
-/* power down chip */
-static int uda1380_remove(struct platform_device *pdev)
+static int __devexit uda1380_i2c_remove(struct i2c_client *i2c)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec = socdev->card->codec;
-
- if (codec->control_data)
- uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- snd_soc_free_pcms(socdev);
- snd_soc_dapm_free(socdev);
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- i2c_unregister_device(codec->control_data);
- i2c_del_driver(&uda1380_i2c_driver);
-#endif
- kfree(codec);
-
+ struct uda1380_priv *uda1380 = i2c_get_clientdata(i2c);
+ uda1380_unregister(uda1380);
return 0;
}
-struct snd_soc_codec_device soc_codec_dev_uda1380 = {
- .probe = uda1380_probe,
- .remove = uda1380_remove,
- .suspend = uda1380_suspend,
- .resume = uda1380_resume,
+static const struct i2c_device_id uda1380_i2c_id[] = {
+ { "uda1380", 0 },
+ { }
};
-EXPORT_SYMBOL_GPL(soc_codec_dev_uda1380);
+MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id);
+
+static struct i2c_driver uda1380_i2c_driver = {
+ .driver = {
+ .name = "UDA1380 I2C Codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = uda1380_i2c_probe,
+ .remove = __devexit_p(uda1380_i2c_remove),
+ .id_table = uda1380_i2c_id,
+};
+#endif
static int __init uda1380_modinit(void)
{
- return snd_soc_register_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&uda1380_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
+#endif
+ return 0;
}
module_init(uda1380_modinit);
static void __exit uda1380_exit(void)
{
- snd_soc_unregister_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&uda1380_i2c_driver);
+#endif
}
module_exit(uda1380_exit);
diff --git a/sound/soc/codecs/uda1380.h b/sound/soc/codecs/uda1380.h
index c55c17a52a12..9cefa8a54770 100644
--- a/sound/soc/codecs/uda1380.h
+++ b/sound/soc/codecs/uda1380.h
@@ -72,14 +72,6 @@
#define R22_SKIP_DCFIL 0x0002
#define R23_AGC_EN 0x0001
-struct uda1380_setup_data {
- int i2c_bus;
- unsigned short i2c_address;
- int dac_clk;
-#define UDA1380_DAC_CLK_SYSCLK 0
-#define UDA1380_DAC_CLK_WSPLL 1
-};
-
#define UDA1380_DAI_DUPLEX 0 /* playback and capture on single DAI */
#define UDA1380_DAI_PLAYBACK 1 /* playback DAI */
#define UDA1380_DAI_CAPTURE 2 /* capture DAI */
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index e7348d341b76..4ded0e3a35e0 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -406,7 +406,6 @@ static const char *wm8350_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" };
static const char *wm8350_pol[] = { "Normal", "Inv R", "Inv L", "Inv L & R" };
static const char *wm8350_dacmutem[] = { "Normal", "Soft" };
static const char *wm8350_dacmutes[] = { "Fast", "Slow" };
-static const char *wm8350_dacfilter[] = { "Normal", "Sloping" };
static const char *wm8350_adcfilter[] = { "None", "High Pass" };
static const char *wm8350_adchp[] = { "44.1kHz", "8kHz", "16kHz", "32kHz" };
static const char *wm8350_lr[] = { "Left", "Right" };
@@ -416,7 +415,6 @@ static const struct soc_enum wm8350_enum[] = {
SOC_ENUM_SINGLE(WM8350_DAC_CONTROL, 0, 4, wm8350_pol),
SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 14, 2, wm8350_dacmutem),
SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 13, 2, wm8350_dacmutes),
- SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 12, 2, wm8350_dacfilter),
SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 15, 2, wm8350_adcfilter),
SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 8, 4, wm8350_adchp),
SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 0, 4, wm8350_pol),
@@ -444,10 +442,9 @@ static const struct snd_kcontrol_new wm8350_snd_controls[] = {
0, 255, 0, dac_pcm_tlv),
SOC_ENUM("Playback PCM Mute Function", wm8350_enum[2]),
SOC_ENUM("Playback PCM Mute Speed", wm8350_enum[3]),
- SOC_ENUM("Playback PCM Filter", wm8350_enum[4]),
- SOC_ENUM("Capture PCM Filter", wm8350_enum[5]),
- SOC_ENUM("Capture PCM HP Filter", wm8350_enum[6]),
- SOC_ENUM("Capture ADC Inversion", wm8350_enum[7]),
+ SOC_ENUM("Capture PCM Filter", wm8350_enum[4]),
+ SOC_ENUM("Capture PCM HP Filter", wm8350_enum[5]),
+ SOC_ENUM("Capture ADC Inversion", wm8350_enum[6]),
SOC_WM8350_DOUBLE_R_TLV("Capture PCM Volume",
WM8350_ADC_DIGITAL_VOLUME_L,
WM8350_ADC_DIGITAL_VOLUME_R,
@@ -993,6 +990,7 @@ static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai)
{
struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8350 *wm8350 = codec->control_data;
u16 iface = wm8350_codec_read(codec, WM8350_AI_FORMATING) &
~WM8350_AIF_WL_MASK;
@@ -1012,6 +1010,19 @@ static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream,
}
wm8350_codec_write(codec, WM8350_AI_FORMATING, iface);
+
+ /* The sloping stopband filter is recommended for use with
+ * lower sample rates to improve performance.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (params_rate(params) < 24000)
+ wm8350_set_bits(wm8350, WM8350_DAC_MUTE_VOLUME,
+ WM8350_DAC_SB_FILT);
+ else
+ wm8350_clear_bits(wm8350, WM8350_DAC_MUTE_VOLUME,
+ WM8350_DAC_SB_FILT);
+ }
+
return 0;
}
@@ -1660,6 +1671,21 @@ static int __devexit wm8350_codec_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8350_codec_suspend(struct platform_device *pdev, pm_message_t m)
+{
+ return snd_soc_suspend_device(&pdev->dev);
+}
+
+static int wm8350_codec_resume(struct platform_device *pdev)
+{
+ return snd_soc_resume_device(&pdev->dev);
+}
+#else
+#define wm8350_codec_suspend NULL
+#define wm8350_codec_resume NULL
+#endif
+
static struct platform_driver wm8350_codec_driver = {
.driver = {
.name = "wm8350-codec",
@@ -1667,6 +1693,8 @@ static struct platform_driver wm8350_codec_driver = {
},
.probe = wm8350_codec_probe,
.remove = __devexit_p(wm8350_codec_remove),
+ .suspend = wm8350_codec_suspend,
+ .resume = wm8350_codec_resume,
};
static __init int wm8350_init(void)
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 502eefac1ecd..0bf903f27564 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1553,6 +1553,21 @@ static int __exit wm8400_codec_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8400_pdev_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&pdev->dev);
+}
+
+static int wm8400_pdev_resume(struct platform_device *pdev)
+{
+ return snd_soc_resume_device(&pdev->dev);
+}
+#else
+#define wm8400_pdev_suspend NULL
+#define wm8400_pdev_resume NULL
+#endif
+
static struct platform_driver wm8400_codec_driver = {
.driver = {
.name = "wm8400-codec",
@@ -1560,6 +1575,8 @@ static struct platform_driver wm8400_codec_driver = {
},
.probe = wm8400_codec_probe,
.remove = __exit_p(wm8400_codec_remove),
+ .suspend = wm8400_pdev_suspend,
+ .resume = wm8400_pdev_resume,
};
static int __init wm8400_codec_init(void)
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
new file mode 100644
index 000000000000..3b499ae7ce6c
--- /dev/null
+++ b/sound/soc/codecs/wm8523.c
@@ -0,0 +1,755 @@
+/*
+ * wm8523.c -- WM8523 ALSA SoC Audio driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8523.h"
+
+static struct snd_soc_codec *wm8523_codec;
+struct snd_soc_codec_device soc_codec_dev_wm8523;
+
+#define WM8523_NUM_SUPPLIES 2
+static const char *wm8523_supply_names[WM8523_NUM_SUPPLIES] = {
+ "AVDD",
+ "LINEVDD",
+};
+
+#define WM8523_NUM_RATES 7
+
+/* codec private data */
+struct wm8523_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[WM8523_REGISTER_COUNT];
+ struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES];
+ unsigned int sysclk;
+ unsigned int rate_constraint_list[WM8523_NUM_RATES];
+ struct snd_pcm_hw_constraint_list rate_constraint;
+};
+
+static const u16 wm8523_reg[WM8523_REGISTER_COUNT] = {
+ 0x8523, /* R0 - DEVICE_ID */
+ 0x0001, /* R1 - REVISION */
+ 0x0000, /* R2 - PSCTRL1 */
+ 0x1812, /* R3 - AIF_CTRL1 */
+ 0x0000, /* R4 - AIF_CTRL2 */
+ 0x0001, /* R5 - DAC_CTRL3 */
+ 0x0190, /* R6 - DAC_GAINL */
+ 0x0190, /* R7 - DAC_GAINR */
+ 0x0000, /* R8 - ZERO_DETECT */
+};
+
+static int wm8523_volatile(unsigned int reg)
+{
+ switch (reg) {
+ case WM8523_DEVICE_ID:
+ case WM8523_REVISION:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int wm8523_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ struct wm8523_priv *wm8523 = codec->private_data;
+ u8 data[3];
+
+ BUG_ON(reg > WM8523_MAX_REGISTER);
+
+ data[0] = reg;
+ data[1] = (value >> 8) & 0x00ff;
+ data[2] = value & 0x00ff;
+
+ if (!wm8523_volatile(reg))
+ wm8523->reg_cache[reg] = value;
+ if (codec->hw_write(codec->control_data, data, 3) == 3)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int wm8523_reset(struct snd_soc_codec *codec)
+{
+ return wm8523_write(codec, WM8523_DEVICE_ID, 0);
+}
+
+static unsigned int wm8523_read_hw(struct snd_soc_codec *codec, u8 reg)
+{
+ struct i2c_msg xfer[2];
+ u16 data;
+ int ret;
+ struct i2c_client *i2c = codec->control_data;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 2;
+ xfer[1].buf = (u8 *)&data;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret != 2) {
+ dev_err(codec->dev, "Failed to read 0x%x: %d\n", reg, ret);
+ return 0;
+ }
+
+ return (data >> 8) | ((data & 0xff) << 8);
+}
+
+
+static unsigned int wm8523_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *reg_cache = codec->reg_cache;
+
+ BUG_ON(reg > WM8523_MAX_REGISTER);
+
+ if (wm8523_volatile(reg))
+ return wm8523_read_hw(codec, reg);
+ else
+ return reg_cache[reg];
+}
+
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 25, 0);
+
+static const char *wm8523_zd_count_text[] = {
+ "1024",
+ "2048",
+};
+
+static const struct soc_enum wm8523_zc_count =
+ SOC_ENUM_SINGLE(WM8523_ZERO_DETECT, 0, 2, wm8523_zd_count_text);
+
+static const struct snd_kcontrol_new wm8523_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Playback Volume", WM8523_DAC_GAINL, WM8523_DAC_GAINR,
+ 0, 448, 0, dac_tlv),
+SOC_SINGLE("ZC Switch", WM8523_DAC_CTRL3, 4, 1, 0),
+SOC_SINGLE("Playback Deemphasis Switch", WM8523_AIF_CTRL1, 8, 1, 0),
+SOC_DOUBLE("Playback Switch", WM8523_DAC_CTRL3, 2, 3, 1, 1),
+SOC_SINGLE("Volume Ramp Up Switch", WM8523_DAC_CTRL3, 1, 1, 0),
+SOC_SINGLE("Volume Ramp Down Switch", WM8523_DAC_CTRL3, 0, 1, 0),
+SOC_ENUM("Zero Detect Count", wm8523_zc_count),
+};
+
+static const struct snd_soc_dapm_widget wm8523_dapm_widgets[] = {
+SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_OUTPUT("LINEVOUTL"),
+SND_SOC_DAPM_OUTPUT("LINEVOUTR"),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ { "LINEVOUTL", NULL, "DAC" },
+ { "LINEVOUTR", NULL, "DAC" },
+};
+
+static int wm8523_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets,
+ ARRAY_SIZE(wm8523_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+
+ snd_soc_dapm_new_widgets(codec);
+ return 0;
+}
+
+static struct {
+ int value;
+ int ratio;
+} lrclk_ratios[WM8523_NUM_RATES] = {
+ { 1, 128 },
+ { 2, 192 },
+ { 3, 256 },
+ { 4, 384 },
+ { 5, 512 },
+ { 6, 768 },
+ { 7, 1152 },
+};
+
+static int wm8523_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8523_priv *wm8523 = codec->private_data;
+
+ /* The set of sample rates that can be supported depends on the
+ * MCLK supplied to the CODEC - enforce this.
+ */
+ if (!wm8523->sysclk) {
+ dev_err(codec->dev,
+ "No MCLK configured, call set_sysclk() on init\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &wm8523->rate_constraint);
+
+ return 0;
+}
+
+static int wm8523_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct wm8523_priv *wm8523 = codec->private_data;
+ int i;
+ u16 aifctrl1 = wm8523_read(codec, WM8523_AIF_CTRL1);
+ u16 aifctrl2 = wm8523_read(codec, WM8523_AIF_CTRL2);
+
+ /* Find a supported LRCLK ratio */
+ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
+ if (wm8523->sysclk / params_rate(params) ==
+ lrclk_ratios[i].ratio)
+ break;
+ }
+
+ /* Should never happen, should be handled by constraints */
+ if (i == ARRAY_SIZE(lrclk_ratios)) {
+ dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n",
+ wm8523->sysclk / params_rate(params));
+ return -EINVAL;
+ }
+
+ aifctrl2 &= ~WM8523_SR_MASK;
+ aifctrl2 |= lrclk_ratios[i].value;
+
+ aifctrl1 &= ~WM8523_WL_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ aifctrl1 |= 0x8;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ aifctrl1 |= 0x10;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ aifctrl1 |= 0x18;
+ break;
+ }
+
+ wm8523_write(codec, WM8523_AIF_CTRL1, aifctrl1);
+ wm8523_write(codec, WM8523_AIF_CTRL2, aifctrl2);
+
+ return 0;
+}
+
+static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8523_priv *wm8523 = codec->private_data;
+ unsigned int val;
+ int i;
+
+ wm8523->sysclk = freq;
+
+ wm8523->rate_constraint.count = 0;
+ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
+ val = freq / lrclk_ratios[i].ratio;
+ /* Check that it's a standard rate since core can't
+ * cope with others and having the odd rates confuses
+ * constraint matching.
+ */
+ switch (val) {
+ case 8000:
+ case 11025:
+ case 16000:
+ case 22050:
+ case 32000:
+ case 44100:
+ case 48000:
+ case 64000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ dev_dbg(codec->dev, "Supported sample rate: %dHz\n",
+ val);
+ wm8523->rate_constraint_list[i] = val;
+ wm8523->rate_constraint.count++;
+ break;
+ default:
+ dev_dbg(codec->dev, "Skipping sample rate: %dHz\n",
+ val);
+ }
+ }
+
+ /* Need at least one supported rate... */
+ if (wm8523->rate_constraint.count == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 aifctrl1 = wm8523_read(codec, WM8523_AIF_CTRL1);
+
+ aifctrl1 &= ~(WM8523_BCLK_INV_MASK | WM8523_LRCLK_INV_MASK |
+ WM8523_FMT_MASK | WM8523_AIF_MSTR_MASK);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aifctrl1 |= WM8523_AIF_MSTR;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ aifctrl1 |= 0x0002;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aifctrl1 |= 0x0001;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ aifctrl1 |= 0x0003;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ aifctrl1 |= 0x0023;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aifctrl1 |= WM8523_BCLK_INV | WM8523_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aifctrl1 |= WM8523_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aifctrl1 |= WM8523_LRCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wm8523_write(codec, WM8523_AIF_CTRL1, aifctrl1);
+
+ return 0;
+}
+
+static int wm8523_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct wm8523_priv *wm8523 = codec->private_data;
+ int ret, i;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ /* Full power on */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 3);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Initial power up */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 1);
+
+ /* Sync back default/cached values */
+ for (i = WM8523_AIF_CTRL1;
+ i < WM8523_MAX_REGISTER; i++)
+ wm8523_write(codec, i, wm8523->reg_cache[i]);
+
+
+ msleep(100);
+ }
+
+ /* Power up to mute */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 2);
+
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ /* The chip runs through the power down sequence for us. */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 0);
+ msleep(100);
+
+ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ break;
+ }
+ codec->bias_level = level;
+ return 0;
+}
+
+#define WM8523_RATES SNDRV_PCM_RATE_8000_192000
+
+#define WM8523_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8523_dai_ops = {
+ .startup = wm8523_startup,
+ .hw_params = wm8523_hw_params,
+ .set_sysclk = wm8523_set_dai_sysclk,
+ .set_fmt = wm8523_set_dai_fmt,
+};
+
+struct snd_soc_dai wm8523_dai = {
+ .name = "WM8523",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2, /* Mono modes not yet supported */
+ .channels_max = 2,
+ .rates = WM8523_RATES,
+ .formats = WM8523_FORMATS,
+ },
+ .ops = &wm8523_dai_ops,
+};
+EXPORT_SYMBOL_GPL(wm8523_dai);
+
+#ifdef CONFIG_PM
+static int wm8523_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8523_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8523_suspend NULL
+#define wm8523_resume NULL
+#endif
+
+static int wm8523_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8523_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8523_codec;
+ codec = wm8523_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, wm8523_snd_controls,
+ ARRAY_SIZE(wm8523_snd_controls));
+ wm8523_add_widgets(codec);
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+static int wm8523_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8523 = {
+ .probe = wm8523_probe,
+ .remove = wm8523_remove,
+ .suspend = wm8523_suspend,
+ .resume = wm8523_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8523);
+
+static int wm8523_register(struct wm8523_priv *wm8523)
+{
+ int ret;
+ struct snd_soc_codec *codec = &wm8523->codec;
+ int i;
+
+ if (wm8523_codec) {
+ dev_err(codec->dev, "Another WM8523 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8523;
+ codec->name = "WM8523";
+ codec->owner = THIS_MODULE;
+ codec->read = wm8523_read;
+ codec->write = wm8523_write;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8523_set_bias_level;
+ codec->dai = &wm8523_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = WM8523_REGISTER_COUNT;
+ codec->reg_cache = &wm8523->reg_cache;
+
+ wm8523->rate_constraint.list = &wm8523->rate_constraint_list[0];
+ wm8523->rate_constraint.count =
+ ARRAY_SIZE(wm8523->rate_constraint_list);
+
+ memcpy(codec->reg_cache, wm8523_reg, sizeof(wm8523_reg));
+
+ for (i = 0; i < ARRAY_SIZE(wm8523->supplies); i++)
+ wm8523->supplies[i].supply = wm8523_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_get;
+ }
+
+ ret = wm8523_read(codec, WM8523_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read ID register\n");
+ goto err_enable;
+ }
+ if (ret != wm8523_reg[WM8523_DEVICE_ID]) {
+ dev_err(codec->dev, "Device is not a WM8523, ID is %x\n", ret);
+ ret = -EINVAL;
+ goto err_enable;
+ }
+
+ ret = wm8523_read(codec, WM8523_REVISION);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read revision register\n");
+ goto err_enable;
+ }
+ dev_info(codec->dev, "revision %c\n",
+ (ret & WM8523_CHIP_REV_MASK) + 'A');
+
+ ret = wm8523_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ goto err_enable;
+ }
+
+ wm8523_dai.dev = codec->dev;
+
+ /* Change some default settings - latch VU and enable ZC */
+ wm8523->reg_cache[WM8523_DAC_GAINR] |= WM8523_DACR_VU;
+ wm8523->reg_cache[WM8523_DAC_CTRL3] |= WM8523_ZC;
+
+ wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Bias level configuration will have done an extra enable */
+ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+
+ wm8523_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&wm8523_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
+
+err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+err_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+err:
+ kfree(wm8523);
+ return ret;
+}
+
+static void wm8523_unregister(struct wm8523_priv *wm8523)
+{
+ wm8523_set_bias_level(&wm8523->codec, SND_SOC_BIAS_OFF);
+ regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+ snd_soc_unregister_dai(&wm8523_dai);
+ snd_soc_unregister_codec(&wm8523->codec);
+ kfree(wm8523);
+ wm8523_codec = NULL;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8523_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8523_priv *wm8523;
+ struct snd_soc_codec *codec;
+
+ wm8523 = kzalloc(sizeof(struct wm8523_priv), GFP_KERNEL);
+ if (wm8523 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8523->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8523);
+ codec->control_data = i2c;
+
+ codec->dev = &i2c->dev;
+
+ return wm8523_register(wm8523);
+}
+
+static __devexit int wm8523_i2c_remove(struct i2c_client *client)
+{
+ struct wm8523_priv *wm8523 = i2c_get_clientdata(client);
+ wm8523_unregister(wm8523);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8523_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&i2c->dev);
+}
+
+static int wm8523_i2c_resume(struct i2c_client *i2c)
+{
+ return snd_soc_resume_device(&i2c->dev);
+}
+#else
+#define wm8523_i2c_suspend NULL
+#define wm8523_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id wm8523_i2c_id[] = {
+ { "wm8523", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8523_i2c_id);
+
+static struct i2c_driver wm8523_i2c_driver = {
+ .driver = {
+ .name = "WM8523",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8523_i2c_probe,
+ .remove = __devexit_p(wm8523_i2c_remove),
+ .suspend = wm8523_i2c_suspend,
+ .resume = wm8523_i2c_resume,
+ .id_table = wm8523_i2c_id,
+};
+#endif
+
+static int __init wm8523_modinit(void)
+{
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&wm8523_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n",
+ ret);
+ }
+#endif
+ return 0;
+}
+module_init(wm8523_modinit);
+
+static void __exit wm8523_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&wm8523_i2c_driver);
+#endif
+}
+module_exit(wm8523_exit);
+
+MODULE_DESCRIPTION("ASoC WM8523 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8523.h b/sound/soc/codecs/wm8523.h
new file mode 100644
index 000000000000..1aa9ce3e1357
--- /dev/null
+++ b/sound/soc/codecs/wm8523.h
@@ -0,0 +1,160 @@
+/*
+ * wm8523.h -- WM8423 ASoC driver
+ *
+ * Copyright 2009 Wolfson Microelectronics, plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * Based on wm8753.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8523_H
+#define _WM8523_H
+
+/*
+ * Register values.
+ */
+#define WM8523_DEVICE_ID 0x00
+#define WM8523_REVISION 0x01
+#define WM8523_PSCTRL1 0x02
+#define WM8523_AIF_CTRL1 0x03
+#define WM8523_AIF_CTRL2 0x04
+#define WM8523_DAC_CTRL3 0x05
+#define WM8523_DAC_GAINL 0x06
+#define WM8523_DAC_GAINR 0x07
+#define WM8523_ZERO_DETECT 0x08
+
+#define WM8523_REGISTER_COUNT 9
+#define WM8523_MAX_REGISTER 0x08
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - DEVICE_ID
+ */
+#define WM8523_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */
+#define WM8523_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */
+#define WM8523_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */
+
+/*
+ * R1 (0x01) - REVISION
+ */
+#define WM8523_CHIP_REV_MASK 0x0007 /* CHIP_REV - [2:0] */
+#define WM8523_CHIP_REV_SHIFT 0 /* CHIP_REV - [2:0] */
+#define WM8523_CHIP_REV_WIDTH 3 /* CHIP_REV - [2:0] */
+
+/*
+ * R2 (0x02) - PSCTRL1
+ */
+#define WM8523_SYS_ENA_MASK 0x0003 /* SYS_ENA - [1:0] */
+#define WM8523_SYS_ENA_SHIFT 0 /* SYS_ENA - [1:0] */
+#define WM8523_SYS_ENA_WIDTH 2 /* SYS_ENA - [1:0] */
+
+/*
+ * R3 (0x03) - AIF_CTRL1
+ */
+#define WM8523_TDM_MODE_MASK 0x1800 /* TDM_MODE - [12:11] */
+#define WM8523_TDM_MODE_SHIFT 11 /* TDM_MODE - [12:11] */
+#define WM8523_TDM_MODE_WIDTH 2 /* TDM_MODE - [12:11] */
+#define WM8523_TDM_SLOT_MASK 0x0600 /* TDM_SLOT - [10:9] */
+#define WM8523_TDM_SLOT_SHIFT 9 /* TDM_SLOT - [10:9] */
+#define WM8523_TDM_SLOT_WIDTH 2 /* TDM_SLOT - [10:9] */
+#define WM8523_DEEMPH 0x0100 /* DEEMPH */
+#define WM8523_DEEMPH_MASK 0x0100 /* DEEMPH */
+#define WM8523_DEEMPH_SHIFT 8 /* DEEMPH */
+#define WM8523_DEEMPH_WIDTH 1 /* DEEMPH */
+#define WM8523_AIF_MSTR 0x0080 /* AIF_MSTR */
+#define WM8523_AIF_MSTR_MASK 0x0080 /* AIF_MSTR */
+#define WM8523_AIF_MSTR_SHIFT 7 /* AIF_MSTR */
+#define WM8523_AIF_MSTR_WIDTH 1 /* AIF_MSTR */
+#define WM8523_LRCLK_INV 0x0040 /* LRCLK_INV */
+#define WM8523_LRCLK_INV_MASK 0x0040 /* LRCLK_INV */
+#define WM8523_LRCLK_INV_SHIFT 6 /* LRCLK_INV */
+#define WM8523_LRCLK_INV_WIDTH 1 /* LRCLK_INV */
+#define WM8523_BCLK_INV 0x0020 /* BCLK_INV */
+#define WM8523_BCLK_INV_MASK 0x0020 /* BCLK_INV */
+#define WM8523_BCLK_INV_SHIFT 5 /* BCLK_INV */
+#define WM8523_BCLK_INV_WIDTH 1 /* BCLK_INV */
+#define WM8523_WL_MASK 0x0018 /* WL - [4:3] */
+#define WM8523_WL_SHIFT 3 /* WL - [4:3] */
+#define WM8523_WL_WIDTH 2 /* WL - [4:3] */
+#define WM8523_FMT_MASK 0x0007 /* FMT - [2:0] */
+#define WM8523_FMT_SHIFT 0 /* FMT - [2:0] */
+#define WM8523_FMT_WIDTH 3 /* FMT - [2:0] */
+
+/*
+ * R4 (0x04) - AIF_CTRL2
+ */
+#define WM8523_DAC_OP_MUX_MASK 0x00C0 /* DAC_OP_MUX - [7:6] */
+#define WM8523_DAC_OP_MUX_SHIFT 6 /* DAC_OP_MUX - [7:6] */
+#define WM8523_DAC_OP_MUX_WIDTH 2 /* DAC_OP_MUX - [7:6] */
+#define WM8523_BCLKDIV_MASK 0x0038 /* BCLKDIV - [5:3] */
+#define WM8523_BCLKDIV_SHIFT 3 /* BCLKDIV - [5:3] */
+#define WM8523_BCLKDIV_WIDTH 3 /* BCLKDIV - [5:3] */
+#define WM8523_SR_MASK 0x0007 /* SR - [2:0] */
+#define WM8523_SR_SHIFT 0 /* SR - [2:0] */
+#define WM8523_SR_WIDTH 3 /* SR - [2:0] */
+
+/*
+ * R5 (0x05) - DAC_CTRL3
+ */
+#define WM8523_ZC 0x0010 /* ZC */
+#define WM8523_ZC_MASK 0x0010 /* ZC */
+#define WM8523_ZC_SHIFT 4 /* ZC */
+#define WM8523_ZC_WIDTH 1 /* ZC */
+#define WM8523_DACR 0x0008 /* DACR */
+#define WM8523_DACR_MASK 0x0008 /* DACR */
+#define WM8523_DACR_SHIFT 3 /* DACR */
+#define WM8523_DACR_WIDTH 1 /* DACR */
+#define WM8523_DACL 0x0004 /* DACL */
+#define WM8523_DACL_MASK 0x0004 /* DACL */
+#define WM8523_DACL_SHIFT 2 /* DACL */
+#define WM8523_DACL_WIDTH 1 /* DACL */
+#define WM8523_VOL_UP_RAMP 0x0002 /* VOL_UP_RAMP */
+#define WM8523_VOL_UP_RAMP_MASK 0x0002 /* VOL_UP_RAMP */
+#define WM8523_VOL_UP_RAMP_SHIFT 1 /* VOL_UP_RAMP */
+#define WM8523_VOL_UP_RAMP_WIDTH 1 /* VOL_UP_RAMP */
+#define WM8523_VOL_DOWN_RAMP 0x0001 /* VOL_DOWN_RAMP */
+#define WM8523_VOL_DOWN_RAMP_MASK 0x0001 /* VOL_DOWN_RAMP */
+#define WM8523_VOL_DOWN_RAMP_SHIFT 0 /* VOL_DOWN_RAMP */
+#define WM8523_VOL_DOWN_RAMP_WIDTH 1 /* VOL_DOWN_RAMP */
+
+/*
+ * R6 (0x06) - DAC_GAINL
+ */
+#define WM8523_DACL_VU 0x0200 /* DACL_VU */
+#define WM8523_DACL_VU_MASK 0x0200 /* DACL_VU */
+#define WM8523_DACL_VU_SHIFT 9 /* DACL_VU */
+#define WM8523_DACL_VU_WIDTH 1 /* DACL_VU */
+#define WM8523_DACL_VOL_MASK 0x01FF /* DACL_VOL - [8:0] */
+#define WM8523_DACL_VOL_SHIFT 0 /* DACL_VOL - [8:0] */
+#define WM8523_DACL_VOL_WIDTH 9 /* DACL_VOL - [8:0] */
+
+/*
+ * R7 (0x07) - DAC_GAINR
+ */
+#define WM8523_DACR_VU 0x0200 /* DACR_VU */
+#define WM8523_DACR_VU_MASK 0x0200 /* DACR_VU */
+#define WM8523_DACR_VU_SHIFT 9 /* DACR_VU */
+#define WM8523_DACR_VU_WIDTH 1 /* DACR_VU */
+#define WM8523_DACR_VOL_MASK 0x01FF /* DACR_VOL - [8:0] */
+#define WM8523_DACR_VOL_SHIFT 0 /* DACR_VOL - [8:0] */
+#define WM8523_DACR_VOL_WIDTH 9 /* DACR_VOL - [8:0] */
+
+/*
+ * R8 (0x08) - ZERO_DETECT
+ */
+#define WM8523_ZD_COUNT_MASK 0x0003 /* ZD_COUNT - [1:0] */
+#define WM8523_ZD_COUNT_SHIFT 0 /* ZD_COUNT - [1:0] */
+#define WM8523_ZD_COUNT_WIDTH 2 /* ZD_COUNT - [1:0] */
+
+extern struct snd_soc_dai wm8523_dai;
+extern struct snd_soc_codec_device soc_codec_dev_wm8523;
+
+#endif
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 86c4b24db817..97b9ed95d289 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -24,6 +24,8 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -187,15 +189,22 @@ struct pll_state {
unsigned int out;
};
+#define WM8580_NUM_SUPPLIES 3
+static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
+ "AVDD",
+ "DVDD",
+ "PVDD",
+};
+
/* codec private data */
struct wm8580_priv {
struct snd_soc_codec codec;
+ struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
u16 reg_cache[WM8580_MAX_REGISTER + 1];
struct pll_state a;
struct pll_state b;
};
-
/*
* read wm8580 register cache
*/
@@ -922,11 +931,28 @@ static int wm8580_register(struct wm8580_priv *wm8580)
memcpy(codec->reg_cache, wm8580_reg, sizeof(wm8580_reg));
+ for (i = 0; i < ARRAY_SIZE(wm8580->supplies); i++)
+ wm8580->supplies[i].supply = wm8580_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8580->supplies),
+ wm8580->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
+ wm8580->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_regulator_get;
+ }
+
/* Get the codec into a known state */
ret = wm8580_write(codec, WM8580_RESET, 0);
if (ret != 0) {
dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
- goto err;
+ goto err_regulator_enable;
}
for (i = 0; i < ARRAY_SIZE(wm8580_dai); i++)
@@ -939,7 +965,7 @@ static int wm8580_register(struct wm8580_priv *wm8580)
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- goto err;
+ goto err_regulator_enable;
}
ret = snd_soc_register_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai));
@@ -952,6 +978,10 @@ static int wm8580_register(struct wm8580_priv *wm8580)
err_codec:
snd_soc_unregister_codec(codec);
+err_regulator_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
+err_regulator_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
err:
kfree(wm8580);
return ret;
@@ -962,6 +992,8 @@ static void wm8580_unregister(struct wm8580_priv *wm8580)
wm8580_set_bias_level(&wm8580->codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai));
snd_soc_unregister_codec(&wm8580->codec);
+ regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
+ regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
kfree(wm8580);
wm8580_codec = NULL;
}
@@ -995,6 +1027,21 @@ static int wm8580_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8580_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8580_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8580_i2c_suspend NULL
+#define wm8580_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8580_i2c_id[] = {
{ "wm8580", 0 },
{ }
@@ -1008,6 +1055,8 @@ static struct i2c_driver wm8580_i2c_driver = {
},
.probe = wm8580_i2c_probe,
.remove = wm8580_i2c_remove,
+ .suspend = wm8580_i2c_suspend,
+ .resume = wm8580_i2c_resume,
.id_table = wm8580_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7a205876ef4f..d7f4788f7ace 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -460,6 +460,7 @@ struct snd_soc_dai wm8731_dai = {
};
EXPORT_SYMBOL_GPL(wm8731_dai);
+#ifdef CONFIG_PM
static int wm8731_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
@@ -488,6 +489,10 @@ static int wm8731_resume(struct platform_device *pdev)
wm8731_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
+#else
+#define wm8731_suspend NULL
+#define wm8731_resume NULL
+#endif
static int wm8731_probe(struct platform_device *pdev)
{
@@ -680,6 +685,21 @@ static int __devexit wm8731_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8731_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8731_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+#else
+#define wm8731_spi_suspend NULL
+#define wm8731_spi_resume NULL
+#endif
+
static struct spi_driver wm8731_spi_driver = {
.driver = {
.name = "wm8731",
@@ -687,6 +707,8 @@ static struct spi_driver wm8731_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8731_spi_probe,
+ .suspend = wm8731_spi_suspend,
+ .resume = wm8731_spi_resume,
.remove = __devexit_p(wm8731_spi_remove),
};
#endif /* CONFIG_SPI_MASTER */
@@ -720,6 +742,21 @@ static __devexit int wm8731_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8731_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&i2c->dev);
+}
+
+static int wm8731_i2c_resume(struct i2c_client *i2c)
+{
+ return snd_soc_resume_device(&i2c->dev);
+}
+#else
+#define wm8731_i2c_suspend NULL
+#define wm8731_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8731_i2c_id[] = {
{ "wm8731", 0 },
{ }
@@ -733,6 +770,8 @@ static struct i2c_driver wm8731_i2c_driver = {
},
.probe = wm8731_i2c_probe,
.remove = __devexit_p(wm8731_i2c_remove),
+ .suspend = wm8731_i2c_suspend,
+ .resume = wm8731_i2c_resume,
.id_table = wm8731_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index d28eeaceb857..370f7df03628 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1766,6 +1766,21 @@ static int wm8753_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8753_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8753_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8753_i2c_suspend NULL
+#define wm8753_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8753_i2c_id[] = {
{ "wm8753", 0 },
{ }
@@ -1779,6 +1794,8 @@ static struct i2c_driver wm8753_i2c_driver = {
},
.probe = wm8753_i2c_probe,
.remove = wm8753_i2c_remove,
+ .suspend = wm8753_i2c_suspend,
+ .resume = wm8753_i2c_resume,
.id_table = wm8753_i2c_id,
};
#endif
@@ -1834,6 +1851,22 @@ static int __devexit wm8753_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8753_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8753_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+
+#else
+#define wm8753_spi_suspend NULL
+#define wm8753_spi_resume NULL
+#endif
+
static struct spi_driver wm8753_spi_driver = {
.driver = {
.name = "wm8753",
@@ -1842,6 +1875,8 @@ static struct spi_driver wm8753_spi_driver = {
},
.probe = wm8753_spi_probe,
.remove = __devexit_p(wm8753_spi_remove),
+ .suspend = wm8753_spi_suspend,
+ .resume = wm8753_spi_resume,
};
#endif
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 3c78945244b8..ac308993ac5a 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -116,6 +116,7 @@
#define WM8900_REG_CLOCKING2_DAC_CLKDIV 0x1c
#define WM8900_REG_DACCTRL_MUTE 0x004
+#define WM8900_REG_DACCTRL_DAC_SB_FILT 0x100
#define WM8900_REG_DACCTRL_AIF_LRCLKRATE 0x400
#define WM8900_REG_AUDIO3_ADCLRC_DIR 0x0800
@@ -439,7 +440,6 @@ SOC_SINGLE("DAC Soft Mute Switch", WM8900_REG_DACCTRL, 6, 1, 1),
SOC_ENUM("DAC Mute Rate", dac_mute_rate),
SOC_SINGLE("DAC Mono Switch", WM8900_REG_DACCTRL, 9, 1, 0),
SOC_ENUM("DAC Deemphasis", dac_deemphasis),
-SOC_SINGLE("DAC Sloping Stopband Filter Switch", WM8900_REG_DACCTRL, 8, 1, 0),
SOC_SINGLE("DAC Sigma-Delta Modulator Clock Switch", WM8900_REG_DACCTRL,
12, 1, 0),
@@ -743,6 +743,17 @@ static int wm8900_hw_params(struct snd_pcm_substream *substream,
wm8900_write(codec, WM8900_REG_AUDIO1, reg);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ reg = wm8900_read(codec, WM8900_REG_DACCTRL);
+
+ if (params_rate(params) <= 24000)
+ reg |= WM8900_REG_DACCTRL_DAC_SB_FILT;
+ else
+ reg &= ~WM8900_REG_DACCTRL_DAC_SB_FILT;
+
+ wm8900_write(codec, WM8900_REG_DACCTRL, reg);
+ }
+
return 0;
}
@@ -1388,6 +1399,21 @@ static __devexit int wm8900_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8900_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8900_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8900_i2c_suspend NULL
+#define wm8900_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8900_i2c_id[] = {
{ "wm8900", 0 },
{ }
@@ -1401,6 +1427,8 @@ static struct i2c_driver wm8900_i2c_driver = {
},
.probe = wm8900_i2c_probe,
.remove = __devexit_p(wm8900_i2c_remove),
+ .suspend = wm8900_i2c_suspend,
+ .resume = wm8900_i2c_resume,
.id_table = wm8900_i2c_id,
};
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index e8d2e3e14c45..c9baeae3e275 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -715,8 +715,6 @@ SOC_ENUM("DAC Soft Mute Rate", soft_mute),
SOC_ENUM("DAC Mute Mode", mute_mode),
SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0),
SOC_ENUM("DAC De-emphasis", dac_deemphasis),
-SOC_SINGLE("DAC Sloping Stopband Filter Switch",
- WM8903_DAC_DIGITAL_1, 11, 1, 0),
SOC_ENUM("DAC Companding Mode", dac_companding),
SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0),
@@ -1373,12 +1371,19 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
u16 aif3 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_3);
u16 clock0 = wm8903_read(codec, WM8903_CLOCK_RATES_0);
u16 clock1 = wm8903_read(codec, WM8903_CLOCK_RATES_1);
+ u16 dac_digital1 = wm8903_read(codec, WM8903_DAC_DIGITAL_1);
if (substream == wm8903->slave_substream) {
dev_dbg(&i2c->dev, "Ignoring hw_params for slave substream\n");
return 0;
}
+ /* Enable sloping stopband filter for low sample rates */
+ if (fs <= 24000)
+ dac_digital1 |= WM8903_DAC_SB_FILT;
+ else
+ dac_digital1 &= ~WM8903_DAC_SB_FILT;
+
/* Configure sample rate logic for DSP - choose nearest rate */
dsp_config = 0;
best_val = abs(sample_rates[dsp_config].rate - fs);
@@ -1503,6 +1508,7 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
wm8903_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
wm8903_write(codec, WM8903_AUDIO_INTERFACE_2, aif2);
wm8903_write(codec, WM8903_AUDIO_INTERFACE_3, aif3);
+ wm8903_write(codec, WM8903_DAC_DIGITAL_1, dac_digital1);
return 0;
}
@@ -1721,6 +1727,21 @@ static __devexit int wm8903_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8903_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8903_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8903_i2c_suspend NULL
+#define wm8903_i2c_resume NULL
+#endif
+
/* i2c codec control layer */
static const struct i2c_device_id wm8903_i2c_id[] = {
{ "wm8903", 0 },
@@ -1735,6 +1756,8 @@ static struct i2c_driver wm8903_i2c_driver = {
},
.probe = wm8903_i2c_probe,
.remove = __devexit_p(wm8903_i2c_remove),
+ .suspend = wm8903_i2c_suspend,
+ .resume = wm8903_i2c_resume,
.id_table = wm8903_i2c_id,
};
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index b8e17d6bc1f7..b69210a77423 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -916,6 +916,21 @@ static int __devexit wm8940_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8940_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8940_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8940_i2c_suspend NULL
+#define wm8940_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8940_i2c_id[] = {
{ "wm8940", 0 },
{ }
@@ -929,6 +944,8 @@ static struct i2c_driver wm8940_i2c_driver = {
},
.probe = wm8940_i2c_probe,
.remove = __devexit_p(wm8940_i2c_remove),
+ .suspend = wm8940_i2c_suspend,
+ .resume = wm8940_i2c_resume,
.id_table = wm8940_i2c_id,
};
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index e224d8add170..b7894d6dffc0 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -927,6 +927,21 @@ static __devexit int wm8960_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8960_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8960_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8960_i2c_suspend NULL
+#define wm8960_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8960_i2c_id[] = {
{ "wm8960", 0 },
{ }
@@ -940,6 +955,8 @@ static struct i2c_driver wm8960_i2c_driver = {
},
.probe = wm8960_i2c_probe,
.remove = __devexit_p(wm8960_i2c_remove),
+ .suspend = wm8960_i2c_suspend,
+ .resume = wm8960_i2c_resume,
.id_table = wm8960_i2c_id,
};
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
new file mode 100644
index 000000000000..bd1af92a122f
--- /dev/null
+++ b/sound/soc/codecs/wm8961.c
@@ -0,0 +1,1326 @@
+/*
+ * wm8961.c -- WM8961 ALSA SoC Audio driver
+ *
+ * Author: Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Currently unimplemented features:
+ * - ALC
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8961.h"
+
+#define WM8961_MAX_REGISTER 0xFC
+
+static u16 wm8961_reg_defaults[] = {
+ 0x009F, /* R0 - Left Input volume */
+ 0x009F, /* R1 - Right Input volume */
+ 0x0000, /* R2 - LOUT1 volume */
+ 0x0000, /* R3 - ROUT1 volume */
+ 0x0020, /* R4 - Clocking1 */
+ 0x0008, /* R5 - ADC & DAC Control 1 */
+ 0x0000, /* R6 - ADC & DAC Control 2 */
+ 0x000A, /* R7 - Audio Interface 0 */
+ 0x01F4, /* R8 - Clocking2 */
+ 0x0000, /* R9 - Audio Interface 1 */
+ 0x00FF, /* R10 - Left DAC volume */
+ 0x00FF, /* R11 - Right DAC volume */
+ 0x0000, /* R12 */
+ 0x0000, /* R13 */
+ 0x0040, /* R14 - Audio Interface 2 */
+ 0x0000, /* R15 - Software Reset */
+ 0x0000, /* R16 */
+ 0x007B, /* R17 - ALC1 */
+ 0x0000, /* R18 - ALC2 */
+ 0x0032, /* R19 - ALC3 */
+ 0x0000, /* R20 - Noise Gate */
+ 0x00C0, /* R21 - Left ADC volume */
+ 0x00C0, /* R22 - Right ADC volume */
+ 0x0120, /* R23 - Additional control(1) */
+ 0x0000, /* R24 - Additional control(2) */
+ 0x0000, /* R25 - Pwr Mgmt (1) */
+ 0x0000, /* R26 - Pwr Mgmt (2) */
+ 0x0000, /* R27 - Additional Control (3) */
+ 0x0000, /* R28 - Anti-pop */
+ 0x0000, /* R29 */
+ 0x005F, /* R30 - Clocking 3 */
+ 0x0000, /* R31 */
+ 0x0000, /* R32 - ADCL signal path */
+ 0x0000, /* R33 - ADCR signal path */
+ 0x0000, /* R34 */
+ 0x0000, /* R35 */
+ 0x0000, /* R36 */
+ 0x0000, /* R37 */
+ 0x0000, /* R38 */
+ 0x0000, /* R39 */
+ 0x0000, /* R40 - LOUT2 volume */
+ 0x0000, /* R41 - ROUT2 volume */
+ 0x0000, /* R42 */
+ 0x0000, /* R43 */
+ 0x0000, /* R44 */
+ 0x0000, /* R45 */
+ 0x0000, /* R46 */
+ 0x0000, /* R47 - Pwr Mgmt (3) */
+ 0x0023, /* R48 - Additional Control (4) */
+ 0x0000, /* R49 - Class D Control 1 */
+ 0x0000, /* R50 */
+ 0x0003, /* R51 - Class D Control 2 */
+ 0x0000, /* R52 */
+ 0x0000, /* R53 */
+ 0x0000, /* R54 */
+ 0x0000, /* R55 */
+ 0x0106, /* R56 - Clocking 4 */
+ 0x0000, /* R57 - DSP Sidetone 0 */
+ 0x0000, /* R58 - DSP Sidetone 1 */
+ 0x0000, /* R59 */
+ 0x0000, /* R60 - DC Servo 0 */
+ 0x0000, /* R61 - DC Servo 1 */
+ 0x0000, /* R62 */
+ 0x015E, /* R63 - DC Servo 3 */
+ 0x0010, /* R64 */
+ 0x0010, /* R65 - DC Servo 5 */
+ 0x0000, /* R66 */
+ 0x0001, /* R67 */
+ 0x0003, /* R68 - Analogue PGA Bias */
+ 0x0000, /* R69 - Analogue HP 0 */
+ 0x0060, /* R70 */
+ 0x01FB, /* R71 - Analogue HP 2 */
+ 0x0000, /* R72 - Charge Pump 1 */
+ 0x0065, /* R73 */
+ 0x005F, /* R74 */
+ 0x0059, /* R75 */
+ 0x006B, /* R76 */
+ 0x0038, /* R77 */
+ 0x000C, /* R78 */
+ 0x000A, /* R79 */
+ 0x006B, /* R80 */
+ 0x0000, /* R81 */
+ 0x0000, /* R82 - Charge Pump B */
+ 0x0087, /* R83 */
+ 0x0000, /* R84 */
+ 0x005C, /* R85 */
+ 0x0000, /* R86 */
+ 0x0000, /* R87 - Write Sequencer 1 */
+ 0x0000, /* R88 - Write Sequencer 2 */
+ 0x0000, /* R89 - Write Sequencer 3 */
+ 0x0000, /* R90 - Write Sequencer 4 */
+ 0x0000, /* R91 - Write Sequencer 5 */
+ 0x0000, /* R92 - Write Sequencer 6 */
+ 0x0000, /* R93 - Write Sequencer 7 */
+ 0x0000, /* R94 */
+ 0x0000, /* R95 */
+ 0x0000, /* R96 */
+ 0x0000, /* R97 */
+ 0x0000, /* R98 */
+ 0x0000, /* R99 */
+ 0x0000, /* R100 */
+ 0x0000, /* R101 */
+ 0x0000, /* R102 */
+ 0x0000, /* R103 */
+ 0x0000, /* R104 */
+ 0x0000, /* R105 */
+ 0x0000, /* R106 */
+ 0x0000, /* R107 */
+ 0x0000, /* R108 */
+ 0x0000, /* R109 */
+ 0x0000, /* R110 */
+ 0x0000, /* R111 */
+ 0x0000, /* R112 */
+ 0x0000, /* R113 */
+ 0x0000, /* R114 */
+ 0x0000, /* R115 */
+ 0x0000, /* R116 */
+ 0x0000, /* R117 */
+ 0x0000, /* R118 */
+ 0x0000, /* R119 */
+ 0x0000, /* R120 */
+ 0x0000, /* R121 */
+ 0x0000, /* R122 */
+ 0x0000, /* R123 */
+ 0x0000, /* R124 */
+ 0x0000, /* R125 */
+ 0x0000, /* R126 */
+ 0x0000, /* R127 */
+ 0x0000, /* R128 */
+ 0x0000, /* R129 */
+ 0x0000, /* R130 */
+ 0x0000, /* R131 */
+ 0x0000, /* R132 */
+ 0x0000, /* R133 */
+ 0x0000, /* R134 */
+ 0x0000, /* R135 */
+ 0x0000, /* R136 */
+ 0x0000, /* R137 */
+ 0x0000, /* R138 */
+ 0x0000, /* R139 */
+ 0x0000, /* R140 */
+ 0x0000, /* R141 */
+ 0x0000, /* R142 */
+ 0x0000, /* R143 */
+ 0x0000, /* R144 */
+ 0x0000, /* R145 */
+ 0x0000, /* R146 */
+ 0x0000, /* R147 */
+ 0x0000, /* R148 */
+ 0x0000, /* R149 */
+ 0x0000, /* R150 */
+ 0x0000, /* R151 */
+ 0x0000, /* R152 */
+ 0x0000, /* R153 */
+ 0x0000, /* R154 */
+ 0x0000, /* R155 */
+ 0x0000, /* R156 */
+ 0x0000, /* R157 */
+ 0x0000, /* R158 */
+ 0x0000, /* R159 */
+ 0x0000, /* R160 */
+ 0x0000, /* R161 */
+ 0x0000, /* R162 */
+ 0x0000, /* R163 */
+ 0x0000, /* R164 */
+ 0x0000, /* R165 */
+ 0x0000, /* R166 */
+ 0x0000, /* R167 */
+ 0x0000, /* R168 */
+ 0x0000, /* R169 */
+ 0x0000, /* R170 */
+ 0x0000, /* R171 */
+ 0x0000, /* R172 */
+ 0x0000, /* R173 */
+ 0x0000, /* R174 */
+ 0x0000, /* R175 */
+ 0x0000, /* R176 */
+ 0x0000, /* R177 */
+ 0x0000, /* R178 */
+ 0x0000, /* R179 */
+ 0x0000, /* R180 */
+ 0x0000, /* R181 */
+ 0x0000, /* R182 */
+ 0x0000, /* R183 */
+ 0x0000, /* R184 */
+ 0x0000, /* R185 */
+ 0x0000, /* R186 */
+ 0x0000, /* R187 */
+ 0x0000, /* R188 */
+ 0x0000, /* R189 */
+ 0x0000, /* R190 */
+ 0x0000, /* R191 */
+ 0x0000, /* R192 */
+ 0x0000, /* R193 */
+ 0x0000, /* R194 */
+ 0x0000, /* R195 */
+ 0x0030, /* R196 */
+ 0x0006, /* R197 */
+ 0x0000, /* R198 */
+ 0x0060, /* R199 */
+ 0x0000, /* R200 */
+ 0x003F, /* R201 */
+ 0x0000, /* R202 */
+ 0x0000, /* R203 */
+ 0x0000, /* R204 */
+ 0x0001, /* R205 */
+ 0x0000, /* R206 */
+ 0x0181, /* R207 */
+ 0x0005, /* R208 */
+ 0x0008, /* R209 */
+ 0x0008, /* R210 */
+ 0x0000, /* R211 */
+ 0x013B, /* R212 */
+ 0x0000, /* R213 */
+ 0x0000, /* R214 */
+ 0x0000, /* R215 */
+ 0x0000, /* R216 */
+ 0x0070, /* R217 */
+ 0x0000, /* R218 */
+ 0x0000, /* R219 */
+ 0x0000, /* R220 */
+ 0x0000, /* R221 */
+ 0x0000, /* R222 */
+ 0x0003, /* R223 */
+ 0x0000, /* R224 */
+ 0x0000, /* R225 */
+ 0x0001, /* R226 */
+ 0x0008, /* R227 */
+ 0x0000, /* R228 */
+ 0x0000, /* R229 */
+ 0x0000, /* R230 */
+ 0x0000, /* R231 */
+ 0x0004, /* R232 */
+ 0x0000, /* R233 */
+ 0x0000, /* R234 */
+ 0x0000, /* R235 */
+ 0x0000, /* R236 */
+ 0x0000, /* R237 */
+ 0x0080, /* R238 */
+ 0x0000, /* R239 */
+ 0x0000, /* R240 */
+ 0x0000, /* R241 */
+ 0x0000, /* R242 */
+ 0x0000, /* R243 */
+ 0x0000, /* R244 */
+ 0x0052, /* R245 */
+ 0x0110, /* R246 */
+ 0x0040, /* R247 */
+ 0x0000, /* R248 */
+ 0x0030, /* R249 */
+ 0x0000, /* R250 */
+ 0x0000, /* R251 */
+ 0x0001, /* R252 - General test 1 */
+};
+
+struct wm8961_priv {
+ struct snd_soc_codec codec;
+ int sysclk;
+ u16 reg_cache[WM8961_MAX_REGISTER];
+};
+
+static int wm8961_reg_is_volatile(int reg)
+{
+ switch (reg) {
+ case WM8961_WRITE_SEQUENCER_7:
+ case WM8961_DC_SERVO_1:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static unsigned int wm8961_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+ BUG_ON(reg > WM8961_MAX_REGISTER);
+ return cache[reg];
+}
+
+static unsigned int wm8961_read_hw(struct snd_soc_codec *codec, u8 reg)
+{
+ struct i2c_msg xfer[2];
+ u16 data;
+ int ret;
+ struct i2c_client *client = codec->control_data;
+
+ BUG_ON(reg > WM8961_MAX_REGISTER);
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 2;
+ xfer[1].buf = (u8 *)&data;
+
+ ret = i2c_transfer(client->adapter, xfer, 2);
+ if (ret != 2) {
+ dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
+ return 0;
+ }
+
+ return (data >> 8) | ((data & 0xff) << 8);
+}
+
+static unsigned int wm8961_read(struct snd_soc_codec *codec, unsigned int reg)
+{
+ if (wm8961_reg_is_volatile(reg))
+ return wm8961_read_hw(codec, reg);
+ else
+ return wm8961_read_reg_cache(codec, reg);
+}
+
+static int wm8961_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u16 *cache = codec->reg_cache;
+ u8 data[3];
+
+ BUG_ON(reg > WM8961_MAX_REGISTER);
+
+ if (!wm8961_reg_is_volatile(reg))
+ cache[reg] = value;
+
+ data[0] = reg;
+ data[1] = value >> 8;
+ data[2] = value & 0x00ff;
+
+ if (codec->hw_write(codec->control_data, data, 3) == 3)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int wm8961_reset(struct snd_soc_codec *codec)
+{
+ return wm8961_write(codec, WM8961_SOFTWARE_RESET, 0);
+}
+
+/*
+ * The headphone output supports special anti-pop sequences giving
+ * silent power up and power down.
+ */
+static int wm8961_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ u16 hp_reg = wm8961_read(codec, WM8961_ANALOGUE_HP_0);
+ u16 cp_reg = wm8961_read(codec, WM8961_CHARGE_PUMP_1);
+ u16 pwr_reg = wm8961_read(codec, WM8961_PWR_MGMT_2);
+ u16 dcs_reg = wm8961_read(codec, WM8961_DC_SERVO_1);
+ int timeout = 500;
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Make sure the output is shorted */
+ hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Enable the charge pump */
+ cp_reg |= WM8961_CP_ENA;
+ wm8961_write(codec, WM8961_CHARGE_PUMP_1, cp_reg);
+ mdelay(5);
+
+ /* Enable the PGA */
+ pwr_reg |= WM8961_LOUT1_PGA | WM8961_ROUT1_PGA;
+ wm8961_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+
+ /* Enable the amplifier */
+ hp_reg |= WM8961_HPR_ENA | WM8961_HPL_ENA;
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Second stage enable */
+ hp_reg |= WM8961_HPR_ENA_DLY | WM8961_HPL_ENA_DLY;
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Enable the DC servo & trigger startup */
+ dcs_reg |=
+ WM8961_DCS_ENA_CHAN_HPR | WM8961_DCS_TRIG_STARTUP_HPR |
+ WM8961_DCS_ENA_CHAN_HPL | WM8961_DCS_TRIG_STARTUP_HPL;
+ dev_dbg(codec->dev, "Enabling DC servo\n");
+
+ wm8961_write(codec, WM8961_DC_SERVO_1, dcs_reg);
+ do {
+ msleep(1);
+ dcs_reg = wm8961_read(codec, WM8961_DC_SERVO_1);
+ } while (--timeout &&
+ dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
+ WM8961_DCS_TRIG_STARTUP_HPL));
+ if (dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
+ WM8961_DCS_TRIG_STARTUP_HPL))
+ dev_err(codec->dev, "DC servo timed out\n");
+ else
+ dev_dbg(codec->dev, "DC servo startup complete\n");
+
+ /* Enable the output stage */
+ hp_reg |= WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP;
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Remove the short on the output stage */
+ hp_reg |= WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT;
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+ }
+
+ if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Short the output */
+ hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Disable the output stage */
+ hp_reg &= ~(WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP);
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Disable DC offset cancellation */
+ dcs_reg &= ~(WM8961_DCS_ENA_CHAN_HPR |
+ WM8961_DCS_ENA_CHAN_HPL);
+ wm8961_write(codec, WM8961_DC_SERVO_1, dcs_reg);
+
+ /* Finish up */
+ hp_reg &= ~(WM8961_HPR_ENA_DLY | WM8961_HPR_ENA |
+ WM8961_HPL_ENA_DLY | WM8961_HPL_ENA);
+ wm8961_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Disable the PGA */
+ pwr_reg &= ~(WM8961_LOUT1_PGA | WM8961_ROUT1_PGA);
+ wm8961_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+
+ /* Disable the charge pump */
+ dev_dbg(codec->dev, "Disabling charge pump\n");
+ wm8961_write(codec, WM8961_CHARGE_PUMP_1,
+ cp_reg & ~WM8961_CP_ENA);
+ }
+
+ return 0;
+}
+
+static int wm8961_spk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ u16 pwr_reg = wm8961_read(codec, WM8961_PWR_MGMT_2);
+ u16 spk_reg = wm8961_read(codec, WM8961_CLASS_D_CONTROL_1);
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Enable the PGA */
+ pwr_reg |= WM8961_SPKL_PGA | WM8961_SPKR_PGA;
+ wm8961_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+
+ /* Enable the amplifier */
+ spk_reg |= WM8961_SPKL_ENA | WM8961_SPKR_ENA;
+ wm8961_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
+ }
+
+ if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Enable the amplifier */
+ spk_reg &= ~(WM8961_SPKL_ENA | WM8961_SPKR_ENA);
+ wm8961_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
+
+ /* Enable the PGA */
+ pwr_reg &= ~(WM8961_SPKL_PGA | WM8961_SPKR_PGA);
+ wm8961_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+ }
+
+ return 0;
+}
+
+static const char *adc_hpf_text[] = {
+ "Hi-fi", "Voice 1", "Voice 2", "Voice 3",
+};
+
+static const struct soc_enum adc_hpf =
+ SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_2, 7, 4, adc_hpf_text);
+
+static const char *dac_deemph_text[] = {
+ "None", "32kHz", "44.1kHz", "48kHz",
+};
+
+static const struct soc_enum dac_deemph =
+ SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_1, 1, 4, dac_deemph_text);
+
+static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
+static unsigned int boost_tlv[] = {
+ TLV_DB_RANGE_HEAD(4),
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
+
+static const struct snd_kcontrol_new wm8961_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Headphone Volume", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
+ 0, 127, 0, out_tlv),
+SOC_DOUBLE_TLV("Headphone Secondary Volume", WM8961_ANALOGUE_HP_2,
+ 6, 3, 7, 0, hp_sec_tlv),
+SOC_DOUBLE_R("Headphone ZC Switch", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
+ 7, 1, 0),
+
+SOC_DOUBLE_R_TLV("Speaker Volume", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
+ 0, 127, 0, out_tlv),
+SOC_DOUBLE_R("Speaker ZC Switch", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
+ 7, 1, 0),
+SOC_SINGLE("Speaker AC Gain", WM8961_CLASS_D_CONTROL_2, 0, 7, 0),
+
+SOC_SINGLE("DAC x128 OSR Switch", WM8961_ADC_DAC_CONTROL_2, 0, 1, 0),
+SOC_ENUM("DAC Deemphasis", dac_deemph),
+SOC_SINGLE("DAC Soft Mute Switch", WM8961_ADC_DAC_CONTROL_2, 3, 1, 0),
+
+SOC_DOUBLE_R_TLV("Sidetone Volume", WM8961_DSP_SIDETONE_0,
+ WM8961_DSP_SIDETONE_1, 4, 12, 0, sidetone_tlv),
+
+SOC_SINGLE("ADC High Pass Filter Switch", WM8961_ADC_DAC_CONTROL_1, 0, 1, 0),
+SOC_ENUM("ADC High Pass Filter Mode", adc_hpf),
+
+SOC_DOUBLE_R_TLV("Capture Volume",
+ WM8961_LEFT_ADC_VOLUME, WM8961_RIGHT_ADC_VOLUME,
+ 1, 119, 0, adc_tlv),
+SOC_DOUBLE_R_TLV("Capture Boost Volume",
+ WM8961_ADCL_SIGNAL_PATH, WM8961_ADCR_SIGNAL_PATH,
+ 4, 3, 0, boost_tlv),
+SOC_DOUBLE_R_TLV("Capture PGA Volume",
+ WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
+ 0, 62, 0, pga_tlv),
+SOC_DOUBLE_R("Capture PGA ZC Switch",
+ WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
+ 6, 1, 1),
+SOC_DOUBLE_R("Capture PGA Switch",
+ WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
+ 7, 1, 1),
+};
+
+static const char *sidetone_text[] = {
+ "None", "Left", "Right"
+};
+
+static const struct soc_enum dacl_sidetone =
+ SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_0, 2, 3, sidetone_text);
+
+static const struct soc_enum dacr_sidetone =
+ SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_1, 2, 3, sidetone_text);
+
+static const struct snd_kcontrol_new dacl_mux =
+ SOC_DAPM_ENUM("DACL Sidetone", dacl_sidetone);
+
+static const struct snd_kcontrol_new dacr_mux =
+ SOC_DAPM_ENUM("DACR Sidetone", dacr_sidetone);
+
+static const struct snd_soc_dapm_widget wm8961_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("LINPUT"),
+SND_SOC_DAPM_INPUT("RINPUT"),
+
+SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8961_CLOCKING2, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Left Input", WM8961_PWR_MGMT_1, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Input", WM8961_PWR_MGMT_1, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", WM8961_PWR_MGMT_1, 3, 0),
+SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", WM8961_PWR_MGMT_1, 2, 0),
+
+SND_SOC_DAPM_MICBIAS("MICBIAS", WM8961_PWR_MGMT_1, 1, 0),
+
+SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &dacl_mux),
+SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &dacr_mux),
+
+SND_SOC_DAPM_DAC("DACL", "HiFi Playback", WM8961_PWR_MGMT_2, 8, 0),
+SND_SOC_DAPM_DAC("DACR", "HiFi Playback", WM8961_PWR_MGMT_2, 7, 0),
+
+/* Handle as a mono path for DCS */
+SND_SOC_DAPM_PGA_E("Headphone Output", SND_SOC_NOPM,
+ 4, 0, NULL, 0, wm8961_hp_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_E("Speaker Output", SND_SOC_NOPM,
+ 4, 0, NULL, 0, wm8961_spk_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+SND_SOC_DAPM_OUTPUT("HP_L"),
+SND_SOC_DAPM_OUTPUT("HP_R"),
+SND_SOC_DAPM_OUTPUT("SPK_LN"),
+SND_SOC_DAPM_OUTPUT("SPK_LP"),
+SND_SOC_DAPM_OUTPUT("SPK_RN"),
+SND_SOC_DAPM_OUTPUT("SPK_RP"),
+};
+
+
+static const struct snd_soc_dapm_route audio_paths[] = {
+ { "DACL", NULL, "CLK_DSP" },
+ { "DACL", NULL, "DACL Sidetone" },
+ { "DACR", NULL, "CLK_DSP" },
+ { "DACR", NULL, "DACR Sidetone" },
+
+ { "DACL Sidetone", "Left", "ADCL" },
+ { "DACL Sidetone", "Right", "ADCR" },
+
+ { "DACR Sidetone", "Left", "ADCL" },
+ { "DACR Sidetone", "Right", "ADCR" },
+
+ { "HP_L", NULL, "Headphone Output" },
+ { "HP_R", NULL, "Headphone Output" },
+ { "Headphone Output", NULL, "DACL" },
+ { "Headphone Output", NULL, "DACR" },
+
+ { "SPK_LN", NULL, "Speaker Output" },
+ { "SPK_LP", NULL, "Speaker Output" },
+ { "SPK_RN", NULL, "Speaker Output" },
+ { "SPK_RP", NULL, "Speaker Output" },
+
+ { "Speaker Output", NULL, "DACL" },
+ { "Speaker Output", NULL, "DACR" },
+
+ { "ADCL", NULL, "Left Input" },
+ { "ADCL", NULL, "CLK_DSP" },
+ { "ADCR", NULL, "Right Input" },
+ { "ADCR", NULL, "CLK_DSP" },
+
+ { "Left Input", NULL, "LINPUT" },
+ { "Right Input", NULL, "RINPUT" },
+
+};
+
+/* Values for CLK_SYS_RATE */
+static struct {
+ int ratio;
+ u16 val;
+} wm8961_clk_sys_ratio[] = {
+ { 64, 0 },
+ { 128, 1 },
+ { 192, 2 },
+ { 256, 3 },
+ { 384, 4 },
+ { 512, 5 },
+ { 768, 6 },
+ { 1024, 7 },
+ { 1408, 8 },
+ { 1536, 9 },
+};
+
+/* Values for SAMPLE_RATE */
+static struct {
+ int rate;
+ u16 val;
+} wm8961_srate[] = {
+ { 48000, 0 },
+ { 44100, 0 },
+ { 32000, 1 },
+ { 22050, 2 },
+ { 24000, 2 },
+ { 16000, 3 },
+ { 11250, 4 },
+ { 12000, 4 },
+ { 8000, 5 },
+};
+
+static int wm8961_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8961_priv *wm8961 = codec->private_data;
+ int i, best, target, fs;
+ u16 reg;
+
+ fs = params_rate(params);
+
+ if (!wm8961->sysclk) {
+ dev_err(codec->dev, "MCLK has not been specified\n");
+ return -EINVAL;
+ }
+
+ /* Find the closest sample rate for the filters */
+ best = 0;
+ for (i = 0; i < ARRAY_SIZE(wm8961_srate); i++) {
+ if (abs(wm8961_srate[i].rate - fs) <
+ abs(wm8961_srate[best].rate - fs))
+ best = i;
+ }
+ reg = wm8961_read(codec, WM8961_ADDITIONAL_CONTROL_3);
+ reg &= ~WM8961_SAMPLE_RATE_MASK;
+ reg |= wm8961_srate[best].val;
+ wm8961_write(codec, WM8961_ADDITIONAL_CONTROL_3, reg);
+ dev_dbg(codec->dev, "Selected SRATE %dHz for %dHz\n",
+ wm8961_srate[best].rate, fs);
+
+ /* Select a CLK_SYS/fs ratio equal to or higher than required */
+ target = wm8961->sysclk / fs;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && target < 64) {
+ dev_err(codec->dev,
+ "SYSCLK must be at least 64*fs for DAC\n");
+ return -EINVAL;
+ }
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && target < 256) {
+ dev_err(codec->dev,
+ "SYSCLK must be at least 256*fs for ADC\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8961_clk_sys_ratio); i++) {
+ if (wm8961_clk_sys_ratio[i].ratio >= target)
+ break;
+ }
+ if (i == ARRAY_SIZE(wm8961_clk_sys_ratio)) {
+ dev_err(codec->dev, "Unable to generate CLK_SYS_RATE\n");
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "Selected CLK_SYS_RATE of %d for %d/%d=%d\n",
+ wm8961_clk_sys_ratio[i].ratio, wm8961->sysclk, fs,
+ wm8961->sysclk / fs);
+
+ reg = wm8961_read(codec, WM8961_CLOCKING_4);
+ reg &= ~WM8961_CLK_SYS_RATE_MASK;
+ reg |= wm8961_clk_sys_ratio[i].val << WM8961_CLK_SYS_RATE_SHIFT;
+ wm8961_write(codec, WM8961_CLOCKING_4, reg);
+
+ reg = wm8961_read(codec, WM8961_AUDIO_INTERFACE_0);
+ reg &= ~WM8961_WL_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ reg |= 1 << WM8961_WL_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ reg |= 2 << WM8961_WL_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ reg |= 3 << WM8961_WL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ wm8961_write(codec, WM8961_AUDIO_INTERFACE_0, reg);
+
+ /* Sloping stop-band filter is recommended for <= 24kHz */
+ reg = wm8961_read(codec, WM8961_ADC_DAC_CONTROL_2);
+ if (fs <= 24000)
+ reg |= WM8961_DACSLOPE;
+ else
+ reg &= WM8961_DACSLOPE;
+ wm8961_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
+
+ return 0;
+}
+
+static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq,
+ int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8961_priv *wm8961 = codec->private_data;
+ u16 reg = wm8961_read(codec, WM8961_CLOCKING1);
+
+ if (freq > 33000000) {
+ dev_err(codec->dev, "MCLK must be <33MHz\n");
+ return -EINVAL;
+ }
+
+ if (freq > 16500000) {
+ dev_dbg(codec->dev, "Using MCLK/2 for %dHz MCLK\n", freq);
+ reg |= WM8961_MCLKDIV;
+ freq /= 2;
+ } else {
+ dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
+ reg &= WM8961_MCLKDIV;
+ }
+
+ wm8961_write(codec, WM8961_CLOCKING1, reg);
+
+ wm8961->sysclk = freq;
+
+ return 0;
+}
+
+static int wm8961_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 aif = wm8961_read(codec, WM8961_AUDIO_INTERFACE_0);
+
+ aif &= ~(WM8961_BCLKINV | WM8961_LRP |
+ WM8961_MS | WM8961_FORMAT_MASK);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif |= WM8961_MS;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif |= 1;
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
+ aif |= 2;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
+ aif |= WM8961_LRP;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif |= 3;
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ case SND_SOC_DAIFMT_IB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aif |= WM8961_LRP;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif |= WM8961_BCLKINV;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aif |= WM8961_BCLKINV | WM8961_LRP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return wm8961_write(codec, WM8961_AUDIO_INTERFACE_0, aif);
+}
+
+static int wm8961_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 reg = wm8961_read(codec, WM8961_ADDITIONAL_CONTROL_2);
+
+ if (tristate)
+ reg |= WM8961_TRIS;
+ else
+ reg &= ~WM8961_TRIS;
+
+ return wm8961_write(codec, WM8961_ADDITIONAL_CONTROL_2, reg);
+}
+
+static int wm8961_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 reg = wm8961_read(codec, WM8961_ADC_DAC_CONTROL_1);
+
+ if (mute)
+ reg |= WM8961_DACMU;
+ else
+ reg &= ~WM8961_DACMU;
+
+ msleep(17);
+
+ return wm8961_write(codec, WM8961_ADC_DAC_CONTROL_1, reg);
+}
+
+static int wm8961_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 reg;
+
+ switch (div_id) {
+ case WM8961_BCLK:
+ reg = wm8961_read(codec, WM8961_CLOCKING2);
+ reg &= ~WM8961_BCLKDIV_MASK;
+ reg |= div;
+ wm8961_write(codec, WM8961_CLOCKING2, reg);
+ break;
+
+ case WM8961_LRCLK:
+ reg = wm8961_read(codec, WM8961_AUDIO_INTERFACE_2);
+ reg &= ~WM8961_LRCLK_RATE_MASK;
+ reg |= div;
+ wm8961_write(codec, WM8961_AUDIO_INTERFACE_2, reg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wm8961_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u16 reg;
+
+ /* This is all slightly unusual since we have no bypass paths
+ * and the output amplifier structure means we can just slam
+ * the biases straight up rather than having to ramp them
+ * slowly.
+ */
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+ /* Enable bias generation */
+ reg = wm8961_read(codec, WM8961_ANTI_POP);
+ reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
+ wm8961_write(codec, WM8961_ANTI_POP, reg);
+
+ /* VMID=2*50k, VREF */
+ reg = wm8961_read(codec, WM8961_PWR_MGMT_1);
+ reg &= ~WM8961_VMIDSEL_MASK;
+ reg |= (1 << WM8961_VMIDSEL_SHIFT) | WM8961_VREF;
+ wm8961_write(codec, WM8961_PWR_MGMT_1, reg);
+ }
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
+ /* VREF off */
+ reg = wm8961_read(codec, WM8961_PWR_MGMT_1);
+ reg &= ~WM8961_VREF;
+ wm8961_write(codec, WM8961_PWR_MGMT_1, reg);
+
+ /* Bias generation off */
+ reg = wm8961_read(codec, WM8961_ANTI_POP);
+ reg &= ~(WM8961_BUFIOEN | WM8961_BUFDCOPEN);
+ wm8961_write(codec, WM8961_ANTI_POP, reg);
+
+ /* VMID off */
+ reg = wm8961_read(codec, WM8961_PWR_MGMT_1);
+ reg &= ~WM8961_VMIDSEL_MASK;
+ wm8961_write(codec, WM8961_PWR_MGMT_1, reg);
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+
+ codec->bias_level = level;
+
+ return 0;
+}
+
+
+#define WM8961_RATES SNDRV_PCM_RATE_8000_48000
+
+#define WM8961_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops wm8961_dai_ops = {
+ .hw_params = wm8961_hw_params,
+ .set_sysclk = wm8961_set_sysclk,
+ .set_fmt = wm8961_set_fmt,
+ .digital_mute = wm8961_digital_mute,
+ .set_tristate = wm8961_set_tristate,
+ .set_clkdiv = wm8961_set_clkdiv,
+};
+
+struct snd_soc_dai wm8961_dai = {
+ .name = "WM8961",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8961_RATES,
+ .formats = WM8961_FORMATS,},
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8961_RATES,
+ .formats = WM8961_FORMATS,},
+ .ops = &wm8961_dai_ops,
+};
+EXPORT_SYMBOL_GPL(wm8961_dai);
+
+
+static struct snd_soc_codec *wm8961_codec;
+
+static int wm8961_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8961_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8961_codec;
+ codec = wm8961_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, wm8961_snd_controls,
+ ARRAY_SIZE(wm8961_snd_controls));
+ snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
+ ARRAY_SIZE(wm8961_dapm_widgets));
+ snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+ snd_soc_dapm_new_widgets(codec);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+static int wm8961_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8961_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8961_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm8961_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+ u16 *reg_cache = codec->reg_cache;
+ int i;
+
+ for (i = 0; i < codec->reg_cache_size; i++) {
+ if (i == WM8961_SOFTWARE_RESET)
+ continue;
+
+ wm8961_write(codec, i, reg_cache[i]);
+ }
+
+ wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8961_suspend NULL
+#define wm8961_resume NULL
+#endif
+
+struct snd_soc_codec_device soc_codec_dev_wm8961 = {
+ .probe = wm8961_probe,
+ .remove = wm8961_remove,
+ .suspend = wm8961_suspend,
+ .resume = wm8961_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8961);
+
+static int wm8961_register(struct wm8961_priv *wm8961)
+{
+ struct snd_soc_codec *codec = &wm8961->codec;
+ int ret;
+ u16 reg;
+
+ if (wm8961_codec) {
+ dev_err(codec->dev, "Another WM8961 is registered\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8961;
+ codec->name = "WM8961";
+ codec->owner = THIS_MODULE;
+ codec->read = wm8961_read;
+ codec->write = wm8961_write;
+ codec->dai = &wm8961_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = ARRAY_SIZE(wm8961->reg_cache);
+ codec->reg_cache = &wm8961->reg_cache;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8961_set_bias_level;
+
+ memcpy(codec->reg_cache, wm8961_reg_defaults,
+ sizeof(wm8961_reg_defaults));
+
+ reg = wm8961_read_hw(codec, WM8961_SOFTWARE_RESET);
+ if (reg != 0x1801) {
+ dev_err(codec->dev, "Device is not a WM8961: ID=0x%x\n", reg);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ reg = wm8961_read_hw(codec, WM8961_RIGHT_INPUT_VOLUME);
+ dev_info(codec->dev, "WM8961 family %d revision %c\n",
+ (reg & WM8961_DEVICE_ID_MASK) >> WM8961_DEVICE_ID_SHIFT,
+ ((reg & WM8961_CHIP_REV_MASK) >> WM8961_CHIP_REV_SHIFT)
+ + 'A');
+
+ ret = wm8961_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ return ret;
+ }
+
+ /* Enable class W */
+ reg = wm8961_read(codec, WM8961_CHARGE_PUMP_B);
+ reg |= WM8961_CP_DYN_PWR_MASK;
+ wm8961_write(codec, WM8961_CHARGE_PUMP_B, reg);
+
+ /* Latch volume update bits (right channel only, we always
+ * write both out) and default ZC on. */
+ reg = wm8961_read(codec, WM8961_ROUT1_VOLUME);
+ wm8961_write(codec, WM8961_ROUT1_VOLUME,
+ reg | WM8961_LO1ZC | WM8961_OUT1VU);
+ wm8961_write(codec, WM8961_LOUT1_VOLUME, reg | WM8961_LO1ZC);
+ reg = wm8961_read(codec, WM8961_ROUT2_VOLUME);
+ wm8961_write(codec, WM8961_ROUT2_VOLUME,
+ reg | WM8961_SPKRZC | WM8961_SPKVU);
+ wm8961_write(codec, WM8961_LOUT2_VOLUME, reg | WM8961_SPKLZC);
+
+ reg = wm8961_read(codec, WM8961_RIGHT_ADC_VOLUME);
+ wm8961_write(codec, WM8961_RIGHT_ADC_VOLUME, reg | WM8961_ADCVU);
+ reg = wm8961_read(codec, WM8961_RIGHT_INPUT_VOLUME);
+ wm8961_write(codec, WM8961_RIGHT_INPUT_VOLUME, reg | WM8961_IPVU);
+
+ /* Use soft mute by default */
+ reg = wm8961_read(codec, WM8961_ADC_DAC_CONTROL_2);
+ reg |= WM8961_DACSMM;
+ wm8961_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
+
+ /* Use automatic clocking mode by default; for now this is all
+ * we support.
+ */
+ reg = wm8961_read(codec, WM8961_CLOCKING_3);
+ reg &= ~WM8961_MANUAL_MODE;
+ wm8961_write(codec, WM8961_CLOCKING_3, reg);
+
+ wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ wm8961_dai.dev = codec->dev;
+
+ wm8961_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&wm8961_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
+
+err:
+ kfree(wm8961);
+ return ret;
+}
+
+static void wm8961_unregister(struct wm8961_priv *wm8961)
+{
+ wm8961_set_bias_level(&wm8961->codec, SND_SOC_BIAS_OFF);
+ snd_soc_unregister_dai(&wm8961_dai);
+ snd_soc_unregister_codec(&wm8961->codec);
+ kfree(wm8961);
+ wm8961_codec = NULL;
+}
+
+static __devinit int wm8961_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8961_priv *wm8961;
+ struct snd_soc_codec *codec;
+
+ wm8961 = kzalloc(sizeof(struct wm8961_priv), GFP_KERNEL);
+ if (wm8961 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8961->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8961);
+ codec->control_data = i2c;
+
+ codec->dev = &i2c->dev;
+
+ return wm8961_register(wm8961);
+}
+
+static __devexit int wm8961_i2c_remove(struct i2c_client *client)
+{
+ struct wm8961_priv *wm8961 = i2c_get_clientdata(client);
+ wm8961_unregister(wm8961);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8961_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8961_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8961_i2c_suspend NULL
+#define wm8961_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id wm8961_i2c_id[] = {
+ { "wm8961", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8961_i2c_id);
+
+static struct i2c_driver wm8961_i2c_driver = {
+ .driver = {
+ .name = "wm8961",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8961_i2c_probe,
+ .remove = __devexit_p(wm8961_i2c_remove),
+ .suspend = wm8961_i2c_suspend,
+ .resume = wm8961_i2c_resume,
+ .id_table = wm8961_i2c_id,
+};
+
+static int __init wm8961_modinit(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&wm8961_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8961 I2C driver: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+module_init(wm8961_modinit);
+
+static void __exit wm8961_exit(void)
+{
+ i2c_del_driver(&wm8961_i2c_driver);
+}
+module_exit(wm8961_exit);
+
+
+MODULE_DESCRIPTION("ASoC WM8961 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8961.h b/sound/soc/codecs/wm8961.h
new file mode 100644
index 000000000000..5513bfd720d6
--- /dev/null
+++ b/sound/soc/codecs/wm8961.h
@@ -0,0 +1,866 @@
+/*
+ * wm8961.h -- WM8961 Soc Audio driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8961_H
+#define _WM8961_H
+
+#include <sound/soc.h>
+
+extern struct snd_soc_codec_device soc_codec_dev_wm8961;
+extern struct snd_soc_dai wm8961_dai;
+
+#define WM8961_BCLK 1
+#define WM8961_LRCLK 2
+
+#define WM8961_BCLK_DIV_1 0
+#define WM8961_BCLK_DIV_1_5 1
+#define WM8961_BCLK_DIV_2 2
+#define WM8961_BCLK_DIV_3 3
+#define WM8961_BCLK_DIV_4 4
+#define WM8961_BCLK_DIV_5_5 5
+#define WM8961_BCLK_DIV_6 6
+#define WM8961_BCLK_DIV_8 7
+#define WM8961_BCLK_DIV_11 8
+#define WM8961_BCLK_DIV_12 9
+#define WM8961_BCLK_DIV_16 10
+#define WM8961_BCLK_DIV_24 11
+#define WM8961_BCLK_DIV_32 13
+
+
+/*
+ * Register values.
+ */
+#define WM8961_LEFT_INPUT_VOLUME 0x00
+#define WM8961_RIGHT_INPUT_VOLUME 0x01
+#define WM8961_LOUT1_VOLUME 0x02
+#define WM8961_ROUT1_VOLUME 0x03
+#define WM8961_CLOCKING1 0x04
+#define WM8961_ADC_DAC_CONTROL_1 0x05
+#define WM8961_ADC_DAC_CONTROL_2 0x06
+#define WM8961_AUDIO_INTERFACE_0 0x07
+#define WM8961_CLOCKING2 0x08
+#define WM8961_AUDIO_INTERFACE_1 0x09
+#define WM8961_LEFT_DAC_VOLUME 0x0A
+#define WM8961_RIGHT_DAC_VOLUME 0x0B
+#define WM8961_AUDIO_INTERFACE_2 0x0E
+#define WM8961_SOFTWARE_RESET 0x0F
+#define WM8961_ALC1 0x11
+#define WM8961_ALC2 0x12
+#define WM8961_ALC3 0x13
+#define WM8961_NOISE_GATE 0x14
+#define WM8961_LEFT_ADC_VOLUME 0x15
+#define WM8961_RIGHT_ADC_VOLUME 0x16
+#define WM8961_ADDITIONAL_CONTROL_1 0x17
+#define WM8961_ADDITIONAL_CONTROL_2 0x18
+#define WM8961_PWR_MGMT_1 0x19
+#define WM8961_PWR_MGMT_2 0x1A
+#define WM8961_ADDITIONAL_CONTROL_3 0x1B
+#define WM8961_ANTI_POP 0x1C
+#define WM8961_CLOCKING_3 0x1E
+#define WM8961_ADCL_SIGNAL_PATH 0x20
+#define WM8961_ADCR_SIGNAL_PATH 0x21
+#define WM8961_LOUT2_VOLUME 0x28
+#define WM8961_ROUT2_VOLUME 0x29
+#define WM8961_PWR_MGMT_3 0x2F
+#define WM8961_ADDITIONAL_CONTROL_4 0x30
+#define WM8961_CLASS_D_CONTROL_1 0x31
+#define WM8961_CLASS_D_CONTROL_2 0x33
+#define WM8961_CLOCKING_4 0x38
+#define WM8961_DSP_SIDETONE_0 0x39
+#define WM8961_DSP_SIDETONE_1 0x3A
+#define WM8961_DC_SERVO_0 0x3C
+#define WM8961_DC_SERVO_1 0x3D
+#define WM8961_DC_SERVO_3 0x3F
+#define WM8961_DC_SERVO_5 0x41
+#define WM8961_ANALOGUE_PGA_BIAS 0x44
+#define WM8961_ANALOGUE_HP_0 0x45
+#define WM8961_ANALOGUE_HP_2 0x47
+#define WM8961_CHARGE_PUMP_1 0x48
+#define WM8961_CHARGE_PUMP_B 0x52
+#define WM8961_WRITE_SEQUENCER_1 0x57
+#define WM8961_WRITE_SEQUENCER_2 0x58
+#define WM8961_WRITE_SEQUENCER_3 0x59
+#define WM8961_WRITE_SEQUENCER_4 0x5A
+#define WM8961_WRITE_SEQUENCER_5 0x5B
+#define WM8961_WRITE_SEQUENCER_6 0x5C
+#define WM8961_WRITE_SEQUENCER_7 0x5D
+#define WM8961_GENERAL_TEST_1 0xFC
+
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Left Input volume
+ */
+#define WM8961_IPVU 0x0100 /* IPVU */
+#define WM8961_IPVU_MASK 0x0100 /* IPVU */
+#define WM8961_IPVU_SHIFT 8 /* IPVU */
+#define WM8961_IPVU_WIDTH 1 /* IPVU */
+#define WM8961_LINMUTE 0x0080 /* LINMUTE */
+#define WM8961_LINMUTE_MASK 0x0080 /* LINMUTE */
+#define WM8961_LINMUTE_SHIFT 7 /* LINMUTE */
+#define WM8961_LINMUTE_WIDTH 1 /* LINMUTE */
+#define WM8961_LIZC 0x0040 /* LIZC */
+#define WM8961_LIZC_MASK 0x0040 /* LIZC */
+#define WM8961_LIZC_SHIFT 6 /* LIZC */
+#define WM8961_LIZC_WIDTH 1 /* LIZC */
+#define WM8961_LINVOL_MASK 0x003F /* LINVOL - [5:0] */
+#define WM8961_LINVOL_SHIFT 0 /* LINVOL - [5:0] */
+#define WM8961_LINVOL_WIDTH 6 /* LINVOL - [5:0] */
+
+/*
+ * R1 (0x01) - Right Input volume
+ */
+#define WM8961_DEVICE_ID_MASK 0xF000 /* DEVICE_ID - [15:12] */
+#define WM8961_DEVICE_ID_SHIFT 12 /* DEVICE_ID - [15:12] */
+#define WM8961_DEVICE_ID_WIDTH 4 /* DEVICE_ID - [15:12] */
+#define WM8961_CHIP_REV_MASK 0x0E00 /* CHIP_REV - [11:9] */
+#define WM8961_CHIP_REV_SHIFT 9 /* CHIP_REV - [11:9] */
+#define WM8961_CHIP_REV_WIDTH 3 /* CHIP_REV - [11:9] */
+#define WM8961_IPVU 0x0100 /* IPVU */
+#define WM8961_IPVU_MASK 0x0100 /* IPVU */
+#define WM8961_IPVU_SHIFT 8 /* IPVU */
+#define WM8961_IPVU_WIDTH 1 /* IPVU */
+#define WM8961_RINMUTE 0x0080 /* RINMUTE */
+#define WM8961_RINMUTE_MASK 0x0080 /* RINMUTE */
+#define WM8961_RINMUTE_SHIFT 7 /* RINMUTE */
+#define WM8961_RINMUTE_WIDTH 1 /* RINMUTE */
+#define WM8961_RIZC 0x0040 /* RIZC */
+#define WM8961_RIZC_MASK 0x0040 /* RIZC */
+#define WM8961_RIZC_SHIFT 6 /* RIZC */
+#define WM8961_RIZC_WIDTH 1 /* RIZC */
+#define WM8961_RINVOL_MASK 0x003F /* RINVOL - [5:0] */
+#define WM8961_RINVOL_SHIFT 0 /* RINVOL - [5:0] */
+#define WM8961_RINVOL_WIDTH 6 /* RINVOL - [5:0] */
+
+/*
+ * R2 (0x02) - LOUT1 volume
+ */
+#define WM8961_OUT1VU 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_MASK 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_SHIFT 8 /* OUT1VU */
+#define WM8961_OUT1VU_WIDTH 1 /* OUT1VU */
+#define WM8961_LO1ZC 0x0080 /* LO1ZC */
+#define WM8961_LO1ZC_MASK 0x0080 /* LO1ZC */
+#define WM8961_LO1ZC_SHIFT 7 /* LO1ZC */
+#define WM8961_LO1ZC_WIDTH 1 /* LO1ZC */
+#define WM8961_LOUT1VOL_MASK 0x007F /* LOUT1VOL - [6:0] */
+#define WM8961_LOUT1VOL_SHIFT 0 /* LOUT1VOL - [6:0] */
+#define WM8961_LOUT1VOL_WIDTH 7 /* LOUT1VOL - [6:0] */
+
+/*
+ * R3 (0x03) - ROUT1 volume
+ */
+#define WM8961_OUT1VU 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_MASK 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_SHIFT 8 /* OUT1VU */
+#define WM8961_OUT1VU_WIDTH 1 /* OUT1VU */
+#define WM8961_RO1ZC 0x0080 /* RO1ZC */
+#define WM8961_RO1ZC_MASK 0x0080 /* RO1ZC */
+#define WM8961_RO1ZC_SHIFT 7 /* RO1ZC */
+#define WM8961_RO1ZC_WIDTH 1 /* RO1ZC */
+#define WM8961_ROUT1VOL_MASK 0x007F /* ROUT1VOL - [6:0] */
+#define WM8961_ROUT1VOL_SHIFT 0 /* ROUT1VOL - [6:0] */
+#define WM8961_ROUT1VOL_WIDTH 7 /* ROUT1VOL - [6:0] */
+
+/*
+ * R4 (0x04) - Clocking1
+ */
+#define WM8961_ADCDIV_MASK 0x01C0 /* ADCDIV - [8:6] */
+#define WM8961_ADCDIV_SHIFT 6 /* ADCDIV - [8:6] */
+#define WM8961_ADCDIV_WIDTH 3 /* ADCDIV - [8:6] */
+#define WM8961_DACDIV_MASK 0x0038 /* DACDIV - [5:3] */
+#define WM8961_DACDIV_SHIFT 3 /* DACDIV - [5:3] */
+#define WM8961_DACDIV_WIDTH 3 /* DACDIV - [5:3] */
+#define WM8961_MCLKDIV 0x0004 /* MCLKDIV */
+#define WM8961_MCLKDIV_MASK 0x0004 /* MCLKDIV */
+#define WM8961_MCLKDIV_SHIFT 2 /* MCLKDIV */
+#define WM8961_MCLKDIV_WIDTH 1 /* MCLKDIV */
+
+/*
+ * R5 (0x05) - ADC & DAC Control 1
+ */
+#define WM8961_ADCPOL_MASK 0x0060 /* ADCPOL - [6:5] */
+#define WM8961_ADCPOL_SHIFT 5 /* ADCPOL - [6:5] */
+#define WM8961_ADCPOL_WIDTH 2 /* ADCPOL - [6:5] */
+#define WM8961_DACMU 0x0008 /* DACMU */
+#define WM8961_DACMU_MASK 0x0008 /* DACMU */
+#define WM8961_DACMU_SHIFT 3 /* DACMU */
+#define WM8961_DACMU_WIDTH 1 /* DACMU */
+#define WM8961_DEEMPH_MASK 0x0006 /* DEEMPH - [2:1] */
+#define WM8961_DEEMPH_SHIFT 1 /* DEEMPH - [2:1] */
+#define WM8961_DEEMPH_WIDTH 2 /* DEEMPH - [2:1] */
+#define WM8961_ADCHPD 0x0001 /* ADCHPD */
+#define WM8961_ADCHPD_MASK 0x0001 /* ADCHPD */
+#define WM8961_ADCHPD_SHIFT 0 /* ADCHPD */
+#define WM8961_ADCHPD_WIDTH 1 /* ADCHPD */
+
+/*
+ * R6 (0x06) - ADC & DAC Control 2
+ */
+#define WM8961_ADC_HPF_CUT_MASK 0x0180 /* ADC_HPF_CUT - [8:7] */
+#define WM8961_ADC_HPF_CUT_SHIFT 7 /* ADC_HPF_CUT - [8:7] */
+#define WM8961_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [8:7] */
+#define WM8961_DACPOL_MASK 0x0060 /* DACPOL - [6:5] */
+#define WM8961_DACPOL_SHIFT 5 /* DACPOL - [6:5] */
+#define WM8961_DACPOL_WIDTH 2 /* DACPOL - [6:5] */
+#define WM8961_DACSMM 0x0008 /* DACSMM */
+#define WM8961_DACSMM_MASK 0x0008 /* DACSMM */
+#define WM8961_DACSMM_SHIFT 3 /* DACSMM */
+#define WM8961_DACSMM_WIDTH 1 /* DACSMM */
+#define WM8961_DACMR 0x0004 /* DACMR */
+#define WM8961_DACMR_MASK 0x0004 /* DACMR */
+#define WM8961_DACMR_SHIFT 2 /* DACMR */
+#define WM8961_DACMR_WIDTH 1 /* DACMR */
+#define WM8961_DACSLOPE 0x0002 /* DACSLOPE */
+#define WM8961_DACSLOPE_MASK 0x0002 /* DACSLOPE */
+#define WM8961_DACSLOPE_SHIFT 1 /* DACSLOPE */
+#define WM8961_DACSLOPE_WIDTH 1 /* DACSLOPE */
+#define WM8961_DAC_OSR128 0x0001 /* DAC_OSR128 */
+#define WM8961_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */
+#define WM8961_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */
+#define WM8961_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
+
+/*
+ * R7 (0x07) - Audio Interface 0
+ */
+#define WM8961_ALRSWAP 0x0100 /* ALRSWAP */
+#define WM8961_ALRSWAP_MASK 0x0100 /* ALRSWAP */
+#define WM8961_ALRSWAP_SHIFT 8 /* ALRSWAP */
+#define WM8961_ALRSWAP_WIDTH 1 /* ALRSWAP */
+#define WM8961_BCLKINV 0x0080 /* BCLKINV */
+#define WM8961_BCLKINV_MASK 0x0080 /* BCLKINV */
+#define WM8961_BCLKINV_SHIFT 7 /* BCLKINV */
+#define WM8961_BCLKINV_WIDTH 1 /* BCLKINV */
+#define WM8961_MS 0x0040 /* MS */
+#define WM8961_MS_MASK 0x0040 /* MS */
+#define WM8961_MS_SHIFT 6 /* MS */
+#define WM8961_MS_WIDTH 1 /* MS */
+#define WM8961_DLRSWAP 0x0020 /* DLRSWAP */
+#define WM8961_DLRSWAP_MASK 0x0020 /* DLRSWAP */
+#define WM8961_DLRSWAP_SHIFT 5 /* DLRSWAP */
+#define WM8961_DLRSWAP_WIDTH 1 /* DLRSWAP */
+#define WM8961_LRP 0x0010 /* LRP */
+#define WM8961_LRP_MASK 0x0010 /* LRP */
+#define WM8961_LRP_SHIFT 4 /* LRP */
+#define WM8961_LRP_WIDTH 1 /* LRP */
+#define WM8961_WL_MASK 0x000C /* WL - [3:2] */
+#define WM8961_WL_SHIFT 2 /* WL - [3:2] */
+#define WM8961_WL_WIDTH 2 /* WL - [3:2] */
+#define WM8961_FORMAT_MASK 0x0003 /* FORMAT - [1:0] */
+#define WM8961_FORMAT_SHIFT 0 /* FORMAT - [1:0] */
+#define WM8961_FORMAT_WIDTH 2 /* FORMAT - [1:0] */
+
+/*
+ * R8 (0x08) - Clocking2
+ */
+#define WM8961_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */
+#define WM8961_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */
+#define WM8961_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */
+#define WM8961_CLK_SYS_ENA 0x0020 /* CLK_SYS_ENA */
+#define WM8961_CLK_SYS_ENA_MASK 0x0020 /* CLK_SYS_ENA */
+#define WM8961_CLK_SYS_ENA_SHIFT 5 /* CLK_SYS_ENA */
+#define WM8961_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
+#define WM8961_CLK_DSP_ENA 0x0010 /* CLK_DSP_ENA */
+#define WM8961_CLK_DSP_ENA_MASK 0x0010 /* CLK_DSP_ENA */
+#define WM8961_CLK_DSP_ENA_SHIFT 4 /* CLK_DSP_ENA */
+#define WM8961_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
+#define WM8961_BCLKDIV_MASK 0x000F /* BCLKDIV - [3:0] */
+#define WM8961_BCLKDIV_SHIFT 0 /* BCLKDIV - [3:0] */
+#define WM8961_BCLKDIV_WIDTH 4 /* BCLKDIV - [3:0] */
+
+/*
+ * R9 (0x09) - Audio Interface 1
+ */
+#define WM8961_DACCOMP_MASK 0x0018 /* DACCOMP - [4:3] */
+#define WM8961_DACCOMP_SHIFT 3 /* DACCOMP - [4:3] */
+#define WM8961_DACCOMP_WIDTH 2 /* DACCOMP - [4:3] */
+#define WM8961_ADCCOMP_MASK 0x0006 /* ADCCOMP - [2:1] */
+#define WM8961_ADCCOMP_SHIFT 1 /* ADCCOMP - [2:1] */
+#define WM8961_ADCCOMP_WIDTH 2 /* ADCCOMP - [2:1] */
+#define WM8961_LOOPBACK 0x0001 /* LOOPBACK */
+#define WM8961_LOOPBACK_MASK 0x0001 /* LOOPBACK */
+#define WM8961_LOOPBACK_SHIFT 0 /* LOOPBACK */
+#define WM8961_LOOPBACK_WIDTH 1 /* LOOPBACK */
+
+/*
+ * R10 (0x0A) - Left DAC volume
+ */
+#define WM8961_DACVU 0x0100 /* DACVU */
+#define WM8961_DACVU_MASK 0x0100 /* DACVU */
+#define WM8961_DACVU_SHIFT 8 /* DACVU */
+#define WM8961_DACVU_WIDTH 1 /* DACVU */
+#define WM8961_LDACVOL_MASK 0x00FF /* LDACVOL - [7:0] */
+#define WM8961_LDACVOL_SHIFT 0 /* LDACVOL - [7:0] */
+#define WM8961_LDACVOL_WIDTH 8 /* LDACVOL - [7:0] */
+
+/*
+ * R11 (0x0B) - Right DAC volume
+ */
+#define WM8961_DACVU 0x0100 /* DACVU */
+#define WM8961_DACVU_MASK 0x0100 /* DACVU */
+#define WM8961_DACVU_SHIFT 8 /* DACVU */
+#define WM8961_DACVU_WIDTH 1 /* DACVU */
+#define WM8961_RDACVOL_MASK 0x00FF /* RDACVOL - [7:0] */
+#define WM8961_RDACVOL_SHIFT 0 /* RDACVOL - [7:0] */
+#define WM8961_RDACVOL_WIDTH 8 /* RDACVOL - [7:0] */
+
+/*
+ * R14 (0x0E) - Audio Interface 2
+ */
+#define WM8961_LRCLK_RATE_MASK 0x01FF /* LRCLK_RATE - [8:0] */
+#define WM8961_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [8:0] */
+#define WM8961_LRCLK_RATE_WIDTH 9 /* LRCLK_RATE - [8:0] */
+
+/*
+ * R15 (0x0F) - Software Reset
+ */
+#define WM8961_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
+#define WM8961_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
+#define WM8961_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
+
+/*
+ * R17 (0x11) - ALC1
+ */
+#define WM8961_ALCSEL_MASK 0x0180 /* ALCSEL - [8:7] */
+#define WM8961_ALCSEL_SHIFT 7 /* ALCSEL - [8:7] */
+#define WM8961_ALCSEL_WIDTH 2 /* ALCSEL - [8:7] */
+#define WM8961_MAXGAIN_MASK 0x0070 /* MAXGAIN - [6:4] */
+#define WM8961_MAXGAIN_SHIFT 4 /* MAXGAIN - [6:4] */
+#define WM8961_MAXGAIN_WIDTH 3 /* MAXGAIN - [6:4] */
+#define WM8961_ALCL_MASK 0x000F /* ALCL - [3:0] */
+#define WM8961_ALCL_SHIFT 0 /* ALCL - [3:0] */
+#define WM8961_ALCL_WIDTH 4 /* ALCL - [3:0] */
+
+/*
+ * R18 (0x12) - ALC2
+ */
+#define WM8961_ALCZC 0x0080 /* ALCZC */
+#define WM8961_ALCZC_MASK 0x0080 /* ALCZC */
+#define WM8961_ALCZC_SHIFT 7 /* ALCZC */
+#define WM8961_ALCZC_WIDTH 1 /* ALCZC */
+#define WM8961_MINGAIN_MASK 0x0070 /* MINGAIN - [6:4] */
+#define WM8961_MINGAIN_SHIFT 4 /* MINGAIN - [6:4] */
+#define WM8961_MINGAIN_WIDTH 3 /* MINGAIN - [6:4] */
+#define WM8961_HLD_MASK 0x000F /* HLD - [3:0] */
+#define WM8961_HLD_SHIFT 0 /* HLD - [3:0] */
+#define WM8961_HLD_WIDTH 4 /* HLD - [3:0] */
+
+/*
+ * R19 (0x13) - ALC3
+ */
+#define WM8961_ALCMODE 0x0100 /* ALCMODE */
+#define WM8961_ALCMODE_MASK 0x0100 /* ALCMODE */
+#define WM8961_ALCMODE_SHIFT 8 /* ALCMODE */
+#define WM8961_ALCMODE_WIDTH 1 /* ALCMODE */
+#define WM8961_DCY_MASK 0x00F0 /* DCY - [7:4] */
+#define WM8961_DCY_SHIFT 4 /* DCY - [7:4] */
+#define WM8961_DCY_WIDTH 4 /* DCY - [7:4] */
+#define WM8961_ATK_MASK 0x000F /* ATK - [3:0] */
+#define WM8961_ATK_SHIFT 0 /* ATK - [3:0] */
+#define WM8961_ATK_WIDTH 4 /* ATK - [3:0] */
+
+/*
+ * R20 (0x14) - Noise Gate
+ */
+#define WM8961_NGTH_MASK 0x00F8 /* NGTH - [7:3] */
+#define WM8961_NGTH_SHIFT 3 /* NGTH - [7:3] */
+#define WM8961_NGTH_WIDTH 5 /* NGTH - [7:3] */
+#define WM8961_NGG 0x0002 /* NGG */
+#define WM8961_NGG_MASK 0x0002 /* NGG */
+#define WM8961_NGG_SHIFT 1 /* NGG */
+#define WM8961_NGG_WIDTH 1 /* NGG */
+#define WM8961_NGAT 0x0001 /* NGAT */
+#define WM8961_NGAT_MASK 0x0001 /* NGAT */
+#define WM8961_NGAT_SHIFT 0 /* NGAT */
+#define WM8961_NGAT_WIDTH 1 /* NGAT */
+
+/*
+ * R21 (0x15) - Left ADC volume
+ */
+#define WM8961_ADCVU 0x0100 /* ADCVU */
+#define WM8961_ADCVU_MASK 0x0100 /* ADCVU */
+#define WM8961_ADCVU_SHIFT 8 /* ADCVU */
+#define WM8961_ADCVU_WIDTH 1 /* ADCVU */
+#define WM8961_LADCVOL_MASK 0x00FF /* LADCVOL - [7:0] */
+#define WM8961_LADCVOL_SHIFT 0 /* LADCVOL - [7:0] */
+#define WM8961_LADCVOL_WIDTH 8 /* LADCVOL - [7:0] */
+
+/*
+ * R22 (0x16) - Right ADC volume
+ */
+#define WM8961_ADCVU 0x0100 /* ADCVU */
+#define WM8961_ADCVU_MASK 0x0100 /* ADCVU */
+#define WM8961_ADCVU_SHIFT 8 /* ADCVU */
+#define WM8961_ADCVU_WIDTH 1 /* ADCVU */
+#define WM8961_RADCVOL_MASK 0x00FF /* RADCVOL - [7:0] */
+#define WM8961_RADCVOL_SHIFT 0 /* RADCVOL - [7:0] */
+#define WM8961_RADCVOL_WIDTH 8 /* RADCVOL - [7:0] */
+
+/*
+ * R23 (0x17) - Additional control(1)
+ */
+#define WM8961_TSDEN 0x0100 /* TSDEN */
+#define WM8961_TSDEN_MASK 0x0100 /* TSDEN */
+#define WM8961_TSDEN_SHIFT 8 /* TSDEN */
+#define WM8961_TSDEN_WIDTH 1 /* TSDEN */
+#define WM8961_DMONOMIX 0x0010 /* DMONOMIX */
+#define WM8961_DMONOMIX_MASK 0x0010 /* DMONOMIX */
+#define WM8961_DMONOMIX_SHIFT 4 /* DMONOMIX */
+#define WM8961_DMONOMIX_WIDTH 1 /* DMONOMIX */
+#define WM8961_TOEN 0x0001 /* TOEN */
+#define WM8961_TOEN_MASK 0x0001 /* TOEN */
+#define WM8961_TOEN_SHIFT 0 /* TOEN */
+#define WM8961_TOEN_WIDTH 1 /* TOEN */
+
+/*
+ * R24 (0x18) - Additional control(2)
+ */
+#define WM8961_TRIS 0x0008 /* TRIS */
+#define WM8961_TRIS_MASK 0x0008 /* TRIS */
+#define WM8961_TRIS_SHIFT 3 /* TRIS */
+#define WM8961_TRIS_WIDTH 1 /* TRIS */
+
+/*
+ * R25 (0x19) - Pwr Mgmt (1)
+ */
+#define WM8961_VMIDSEL_MASK 0x0180 /* VMIDSEL - [8:7] */
+#define WM8961_VMIDSEL_SHIFT 7 /* VMIDSEL - [8:7] */
+#define WM8961_VMIDSEL_WIDTH 2 /* VMIDSEL - [8:7] */
+#define WM8961_VREF 0x0040 /* VREF */
+#define WM8961_VREF_MASK 0x0040 /* VREF */
+#define WM8961_VREF_SHIFT 6 /* VREF */
+#define WM8961_VREF_WIDTH 1 /* VREF */
+#define WM8961_AINL 0x0020 /* AINL */
+#define WM8961_AINL_MASK 0x0020 /* AINL */
+#define WM8961_AINL_SHIFT 5 /* AINL */
+#define WM8961_AINL_WIDTH 1 /* AINL */
+#define WM8961_AINR 0x0010 /* AINR */
+#define WM8961_AINR_MASK 0x0010 /* AINR */
+#define WM8961_AINR_SHIFT 4 /* AINR */
+#define WM8961_AINR_WIDTH 1 /* AINR */
+#define WM8961_ADCL 0x0008 /* ADCL */
+#define WM8961_ADCL_MASK 0x0008 /* ADCL */
+#define WM8961_ADCL_SHIFT 3 /* ADCL */
+#define WM8961_ADCL_WIDTH 1 /* ADCL */
+#define WM8961_ADCR 0x0004 /* ADCR */
+#define WM8961_ADCR_MASK 0x0004 /* ADCR */
+#define WM8961_ADCR_SHIFT 2 /* ADCR */
+#define WM8961_ADCR_WIDTH 1 /* ADCR */
+#define WM8961_MICB 0x0002 /* MICB */
+#define WM8961_MICB_MASK 0x0002 /* MICB */
+#define WM8961_MICB_SHIFT 1 /* MICB */
+#define WM8961_MICB_WIDTH 1 /* MICB */
+
+/*
+ * R26 (0x1A) - Pwr Mgmt (2)
+ */
+#define WM8961_DACL 0x0100 /* DACL */
+#define WM8961_DACL_MASK 0x0100 /* DACL */
+#define WM8961_DACL_SHIFT 8 /* DACL */
+#define WM8961_DACL_WIDTH 1 /* DACL */
+#define WM8961_DACR 0x0080 /* DACR */
+#define WM8961_DACR_MASK 0x0080 /* DACR */
+#define WM8961_DACR_SHIFT 7 /* DACR */
+#define WM8961_DACR_WIDTH 1 /* DACR */
+#define WM8961_LOUT1_PGA 0x0040 /* LOUT1_PGA */
+#define WM8961_LOUT1_PGA_MASK 0x0040 /* LOUT1_PGA */
+#define WM8961_LOUT1_PGA_SHIFT 6 /* LOUT1_PGA */
+#define WM8961_LOUT1_PGA_WIDTH 1 /* LOUT1_PGA */
+#define WM8961_ROUT1_PGA 0x0020 /* ROUT1_PGA */
+#define WM8961_ROUT1_PGA_MASK 0x0020 /* ROUT1_PGA */
+#define WM8961_ROUT1_PGA_SHIFT 5 /* ROUT1_PGA */
+#define WM8961_ROUT1_PGA_WIDTH 1 /* ROUT1_PGA */
+#define WM8961_SPKL_PGA 0x0010 /* SPKL_PGA */
+#define WM8961_SPKL_PGA_MASK 0x0010 /* SPKL_PGA */
+#define WM8961_SPKL_PGA_SHIFT 4 /* SPKL_PGA */
+#define WM8961_SPKL_PGA_WIDTH 1 /* SPKL_PGA */
+#define WM8961_SPKR_PGA 0x0008 /* SPKR_PGA */
+#define WM8961_SPKR_PGA_MASK 0x0008 /* SPKR_PGA */
+#define WM8961_SPKR_PGA_SHIFT 3 /* SPKR_PGA */
+#define WM8961_SPKR_PGA_WIDTH 1 /* SPKR_PGA */
+
+/*
+ * R27 (0x1B) - Additional Control (3)
+ */
+#define WM8961_SAMPLE_RATE_MASK 0x0007 /* SAMPLE_RATE - [2:0] */
+#define WM8961_SAMPLE_RATE_SHIFT 0 /* SAMPLE_RATE - [2:0] */
+#define WM8961_SAMPLE_RATE_WIDTH 3 /* SAMPLE_RATE - [2:0] */
+
+/*
+ * R28 (0x1C) - Anti-pop
+ */
+#define WM8961_BUFDCOPEN 0x0010 /* BUFDCOPEN */
+#define WM8961_BUFDCOPEN_MASK 0x0010 /* BUFDCOPEN */
+#define WM8961_BUFDCOPEN_SHIFT 4 /* BUFDCOPEN */
+#define WM8961_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */
+#define WM8961_BUFIOEN 0x0008 /* BUFIOEN */
+#define WM8961_BUFIOEN_MASK 0x0008 /* BUFIOEN */
+#define WM8961_BUFIOEN_SHIFT 3 /* BUFIOEN */
+#define WM8961_BUFIOEN_WIDTH 1 /* BUFIOEN */
+#define WM8961_SOFT_ST 0x0004 /* SOFT_ST */
+#define WM8961_SOFT_ST_MASK 0x0004 /* SOFT_ST */
+#define WM8961_SOFT_ST_SHIFT 2 /* SOFT_ST */
+#define WM8961_SOFT_ST_WIDTH 1 /* SOFT_ST */
+
+/*
+ * R30 (0x1E) - Clocking 3
+ */
+#define WM8961_CLK_TO_DIV_MASK 0x0180 /* CLK_TO_DIV - [8:7] */
+#define WM8961_CLK_TO_DIV_SHIFT 7 /* CLK_TO_DIV - [8:7] */
+#define WM8961_CLK_TO_DIV_WIDTH 2 /* CLK_TO_DIV - [8:7] */
+#define WM8961_CLK_256K_DIV_MASK 0x007E /* CLK_256K_DIV - [6:1] */
+#define WM8961_CLK_256K_DIV_SHIFT 1 /* CLK_256K_DIV - [6:1] */
+#define WM8961_CLK_256K_DIV_WIDTH 6 /* CLK_256K_DIV - [6:1] */
+#define WM8961_MANUAL_MODE 0x0001 /* MANUAL_MODE */
+#define WM8961_MANUAL_MODE_MASK 0x0001 /* MANUAL_MODE */
+#define WM8961_MANUAL_MODE_SHIFT 0 /* MANUAL_MODE */
+#define WM8961_MANUAL_MODE_WIDTH 1 /* MANUAL_MODE */
+
+/*
+ * R32 (0x20) - ADCL signal path
+ */
+#define WM8961_LMICBOOST_MASK 0x0030 /* LMICBOOST - [5:4] */
+#define WM8961_LMICBOOST_SHIFT 4 /* LMICBOOST - [5:4] */
+#define WM8961_LMICBOOST_WIDTH 2 /* LMICBOOST - [5:4] */
+
+/*
+ * R33 (0x21) - ADCR signal path
+ */
+#define WM8961_RMICBOOST_MASK 0x0030 /* RMICBOOST - [5:4] */
+#define WM8961_RMICBOOST_SHIFT 4 /* RMICBOOST - [5:4] */
+#define WM8961_RMICBOOST_WIDTH 2 /* RMICBOOST - [5:4] */
+
+/*
+ * R40 (0x28) - LOUT2 volume
+ */
+#define WM8961_SPKVU 0x0100 /* SPKVU */
+#define WM8961_SPKVU_MASK 0x0100 /* SPKVU */
+#define WM8961_SPKVU_SHIFT 8 /* SPKVU */
+#define WM8961_SPKVU_WIDTH 1 /* SPKVU */
+#define WM8961_SPKLZC 0x0080 /* SPKLZC */
+#define WM8961_SPKLZC_MASK 0x0080 /* SPKLZC */
+#define WM8961_SPKLZC_SHIFT 7 /* SPKLZC */
+#define WM8961_SPKLZC_WIDTH 1 /* SPKLZC */
+#define WM8961_SPKLVOL_MASK 0x007F /* SPKLVOL - [6:0] */
+#define WM8961_SPKLVOL_SHIFT 0 /* SPKLVOL - [6:0] */
+#define WM8961_SPKLVOL_WIDTH 7 /* SPKLVOL - [6:0] */
+
+/*
+ * R41 (0x29) - ROUT2 volume
+ */
+#define WM8961_SPKVU 0x0100 /* SPKVU */
+#define WM8961_SPKVU_MASK 0x0100 /* SPKVU */
+#define WM8961_SPKVU_SHIFT 8 /* SPKVU */
+#define WM8961_SPKVU_WIDTH 1 /* SPKVU */
+#define WM8961_SPKRZC 0x0080 /* SPKRZC */
+#define WM8961_SPKRZC_MASK 0x0080 /* SPKRZC */
+#define WM8961_SPKRZC_SHIFT 7 /* SPKRZC */
+#define WM8961_SPKRZC_WIDTH 1 /* SPKRZC */
+#define WM8961_SPKRVOL_MASK 0x007F /* SPKRVOL - [6:0] */
+#define WM8961_SPKRVOL_SHIFT 0 /* SPKRVOL - [6:0] */
+#define WM8961_SPKRVOL_WIDTH 7 /* SPKRVOL - [6:0] */
+
+/*
+ * R47 (0x2F) - Pwr Mgmt (3)
+ */
+#define WM8961_TEMP_SHUT 0x0002 /* TEMP_SHUT */
+#define WM8961_TEMP_SHUT_MASK 0x0002 /* TEMP_SHUT */
+#define WM8961_TEMP_SHUT_SHIFT 1 /* TEMP_SHUT */
+#define WM8961_TEMP_SHUT_WIDTH 1 /* TEMP_SHUT */
+#define WM8961_TEMP_WARN 0x0001 /* TEMP_WARN */
+#define WM8961_TEMP_WARN_MASK 0x0001 /* TEMP_WARN */
+#define WM8961_TEMP_WARN_SHIFT 0 /* TEMP_WARN */
+#define WM8961_TEMP_WARN_WIDTH 1 /* TEMP_WARN */
+
+/*
+ * R48 (0x30) - Additional Control (4)
+ */
+#define WM8961_TSENSEN 0x0002 /* TSENSEN */
+#define WM8961_TSENSEN_MASK 0x0002 /* TSENSEN */
+#define WM8961_TSENSEN_SHIFT 1 /* TSENSEN */
+#define WM8961_TSENSEN_WIDTH 1 /* TSENSEN */
+#define WM8961_MBSEL 0x0001 /* MBSEL */
+#define WM8961_MBSEL_MASK 0x0001 /* MBSEL */
+#define WM8961_MBSEL_SHIFT 0 /* MBSEL */
+#define WM8961_MBSEL_WIDTH 1 /* MBSEL */
+
+/*
+ * R49 (0x31) - Class D Control 1
+ */
+#define WM8961_SPKR_ENA 0x0080 /* SPKR_ENA */
+#define WM8961_SPKR_ENA_MASK 0x0080 /* SPKR_ENA */
+#define WM8961_SPKR_ENA_SHIFT 7 /* SPKR_ENA */
+#define WM8961_SPKR_ENA_WIDTH 1 /* SPKR_ENA */
+#define WM8961_SPKL_ENA 0x0040 /* SPKL_ENA */
+#define WM8961_SPKL_ENA_MASK 0x0040 /* SPKL_ENA */
+#define WM8961_SPKL_ENA_SHIFT 6 /* SPKL_ENA */
+#define WM8961_SPKL_ENA_WIDTH 1 /* SPKL_ENA */
+
+/*
+ * R51 (0x33) - Class D Control 2
+ */
+#define WM8961_CLASSD_ACGAIN_MASK 0x0007 /* CLASSD_ACGAIN - [2:0] */
+#define WM8961_CLASSD_ACGAIN_SHIFT 0 /* CLASSD_ACGAIN - [2:0] */
+#define WM8961_CLASSD_ACGAIN_WIDTH 3 /* CLASSD_ACGAIN - [2:0] */
+
+/*
+ * R56 (0x38) - Clocking 4
+ */
+#define WM8961_CLK_DCS_DIV_MASK 0x01E0 /* CLK_DCS_DIV - [8:5] */
+#define WM8961_CLK_DCS_DIV_SHIFT 5 /* CLK_DCS_DIV - [8:5] */
+#define WM8961_CLK_DCS_DIV_WIDTH 4 /* CLK_DCS_DIV - [8:5] */
+#define WM8961_CLK_SYS_RATE_MASK 0x001E /* CLK_SYS_RATE - [4:1] */
+#define WM8961_CLK_SYS_RATE_SHIFT 1 /* CLK_SYS_RATE - [4:1] */
+#define WM8961_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [4:1] */
+
+/*
+ * R57 (0x39) - DSP Sidetone 0
+ */
+#define WM8961_ADCR_DAC_SVOL_MASK 0x00F0 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8961_ADCR_DAC_SVOL_SHIFT 4 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8961_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8961_ADC_TO_DACR_MASK 0x000C /* ADC_TO_DACR - [3:2] */
+#define WM8961_ADC_TO_DACR_SHIFT 2 /* ADC_TO_DACR - [3:2] */
+#define WM8961_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [3:2] */
+
+/*
+ * R58 (0x3A) - DSP Sidetone 1
+ */
+#define WM8961_ADCL_DAC_SVOL_MASK 0x00F0 /* ADCL_DAC_SVOL - [7:4] */
+#define WM8961_ADCL_DAC_SVOL_SHIFT 4 /* ADCL_DAC_SVOL - [7:4] */
+#define WM8961_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [7:4] */
+#define WM8961_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
+#define WM8961_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
+#define WM8961_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
+
+/*
+ * R60 (0x3C) - DC Servo 0
+ */
+#define WM8961_DCS_ENA_CHAN_INL 0x0080 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_ENA_CHAN_INL_MASK 0x0080 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_ENA_CHAN_INL_SHIFT 7 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_ENA_CHAN_INL_WIDTH 1 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL 0x0040 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL_MASK 0x0040 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL_SHIFT 6 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL_WIDTH 1 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_SERIES_INL 0x0010 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_TRIG_SERIES_INL_MASK 0x0010 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_TRIG_SERIES_INL_SHIFT 4 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_TRIG_SERIES_INL_WIDTH 1 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_ENA_CHAN_INR 0x0008 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_ENA_CHAN_INR_MASK 0x0008 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_ENA_CHAN_INR_SHIFT 3 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_ENA_CHAN_INR_WIDTH 1 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR 0x0004 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR_MASK 0x0004 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR_SHIFT 2 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR_WIDTH 1 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_SERIES_INR 0x0001 /* DCS_TRIG_SERIES_INR */
+#define WM8961_DCS_TRIG_SERIES_INR_MASK 0x0001 /* DCS_TRIG_SERIES_INR */
+#define WM8961_DCS_TRIG_SERIES_INR_SHIFT 0 /* DCS_TRIG_SERIES_INR */
+#define WM8961_DCS_TRIG_SERIES_INR_WIDTH 1 /* DCS_TRIG_SERIES_INR */
+
+/*
+ * R61 (0x3D) - DC Servo 1
+ */
+#define WM8961_DCS_ENA_CHAN_HPL 0x0080 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_ENA_CHAN_HPL_MASK 0x0080 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_ENA_CHAN_HPL_SHIFT 7 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_ENA_CHAN_HPL_WIDTH 1 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL 0x0040 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL_MASK 0x0040 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL_SHIFT 6 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL_WIDTH 1 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL 0x0010 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL_MASK 0x0010 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL_SHIFT 4 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL_WIDTH 1 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_ENA_CHAN_HPR 0x0008 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_ENA_CHAN_HPR_MASK 0x0008 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_ENA_CHAN_HPR_SHIFT 3 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_ENA_CHAN_HPR_WIDTH 1 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR 0x0004 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR_MASK 0x0004 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR_SHIFT 2 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR_WIDTH 1 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR 0x0001 /* DCS_TRIG_SERIES_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR_MASK 0x0001 /* DCS_TRIG_SERIES_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR_SHIFT 0 /* DCS_TRIG_SERIES_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR_WIDTH 1 /* DCS_TRIG_SERIES_HPR */
+
+/*
+ * R63 (0x3F) - DC Servo 3
+ */
+#define WM8961_DCS_FILT_BW_SERIES_MASK 0x0030 /* DCS_FILT_BW_SERIES - [5:4] */
+#define WM8961_DCS_FILT_BW_SERIES_SHIFT 4 /* DCS_FILT_BW_SERIES - [5:4] */
+#define WM8961_DCS_FILT_BW_SERIES_WIDTH 2 /* DCS_FILT_BW_SERIES - [5:4] */
+
+/*
+ * R65 (0x41) - DC Servo 5
+ */
+#define WM8961_DCS_SERIES_NO_HP_MASK 0x007F /* DCS_SERIES_NO_HP - [6:0] */
+#define WM8961_DCS_SERIES_NO_HP_SHIFT 0 /* DCS_SERIES_NO_HP - [6:0] */
+#define WM8961_DCS_SERIES_NO_HP_WIDTH 7 /* DCS_SERIES_NO_HP - [6:0] */
+
+/*
+ * R68 (0x44) - Analogue PGA Bias
+ */
+#define WM8961_HP_PGAS_BIAS_MASK 0x0007 /* HP_PGAS_BIAS - [2:0] */
+#define WM8961_HP_PGAS_BIAS_SHIFT 0 /* HP_PGAS_BIAS - [2:0] */
+#define WM8961_HP_PGAS_BIAS_WIDTH 3 /* HP_PGAS_BIAS - [2:0] */
+
+/*
+ * R69 (0x45) - Analogue HP 0
+ */
+#define WM8961_HPL_RMV_SHORT 0x0080 /* HPL_RMV_SHORT */
+#define WM8961_HPL_RMV_SHORT_MASK 0x0080 /* HPL_RMV_SHORT */
+#define WM8961_HPL_RMV_SHORT_SHIFT 7 /* HPL_RMV_SHORT */
+#define WM8961_HPL_RMV_SHORT_WIDTH 1 /* HPL_RMV_SHORT */
+#define WM8961_HPL_ENA_OUTP 0x0040 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_OUTP_MASK 0x0040 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_OUTP_SHIFT 6 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_OUTP_WIDTH 1 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_DLY 0x0020 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA_DLY_MASK 0x0020 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA_DLY_SHIFT 5 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA_DLY_WIDTH 1 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA 0x0010 /* HPL_ENA */
+#define WM8961_HPL_ENA_MASK 0x0010 /* HPL_ENA */
+#define WM8961_HPL_ENA_SHIFT 4 /* HPL_ENA */
+#define WM8961_HPL_ENA_WIDTH 1 /* HPL_ENA */
+#define WM8961_HPR_RMV_SHORT 0x0008 /* HPR_RMV_SHORT */
+#define WM8961_HPR_RMV_SHORT_MASK 0x0008 /* HPR_RMV_SHORT */
+#define WM8961_HPR_RMV_SHORT_SHIFT 3 /* HPR_RMV_SHORT */
+#define WM8961_HPR_RMV_SHORT_WIDTH 1 /* HPR_RMV_SHORT */
+#define WM8961_HPR_ENA_OUTP 0x0004 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_OUTP_MASK 0x0004 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_OUTP_SHIFT 2 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_OUTP_WIDTH 1 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_DLY 0x0002 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA_DLY_MASK 0x0002 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA_DLY_SHIFT 1 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA_DLY_WIDTH 1 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA 0x0001 /* HPR_ENA */
+#define WM8961_HPR_ENA_MASK 0x0001 /* HPR_ENA */
+#define WM8961_HPR_ENA_SHIFT 0 /* HPR_ENA */
+#define WM8961_HPR_ENA_WIDTH 1 /* HPR_ENA */
+
+/*
+ * R71 (0x47) - Analogue HP 2
+ */
+#define WM8961_HPL_VOL_MASK 0x01C0 /* HPL_VOL - [8:6] */
+#define WM8961_HPL_VOL_SHIFT 6 /* HPL_VOL - [8:6] */
+#define WM8961_HPL_VOL_WIDTH 3 /* HPL_VOL - [8:6] */
+#define WM8961_HPR_VOL_MASK 0x0038 /* HPR_VOL - [5:3] */
+#define WM8961_HPR_VOL_SHIFT 3 /* HPR_VOL - [5:3] */
+#define WM8961_HPR_VOL_WIDTH 3 /* HPR_VOL - [5:3] */
+#define WM8961_HP_BIAS_BOOST_MASK 0x0007 /* HP_BIAS_BOOST - [2:0] */
+#define WM8961_HP_BIAS_BOOST_SHIFT 0 /* HP_BIAS_BOOST - [2:0] */
+#define WM8961_HP_BIAS_BOOST_WIDTH 3 /* HP_BIAS_BOOST - [2:0] */
+
+/*
+ * R72 (0x48) - Charge Pump 1
+ */
+#define WM8961_CP_ENA 0x0001 /* CP_ENA */
+#define WM8961_CP_ENA_MASK 0x0001 /* CP_ENA */
+#define WM8961_CP_ENA_SHIFT 0 /* CP_ENA */
+#define WM8961_CP_ENA_WIDTH 1 /* CP_ENA */
+
+/*
+ * R82 (0x52) - Charge Pump B
+ */
+#define WM8961_CP_DYN_PWR_MASK 0x0003 /* CP_DYN_PWR - [1:0] */
+#define WM8961_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR - [1:0] */
+#define WM8961_CP_DYN_PWR_WIDTH 2 /* CP_DYN_PWR - [1:0] */
+
+/*
+ * R87 (0x57) - Write Sequencer 1
+ */
+#define WM8961_WSEQ_ENA 0x0020 /* WSEQ_ENA */
+#define WM8961_WSEQ_ENA_MASK 0x0020 /* WSEQ_ENA */
+#define WM8961_WSEQ_ENA_SHIFT 5 /* WSEQ_ENA */
+#define WM8961_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define WM8961_WSEQ_WRITE_INDEX_MASK 0x001F /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8961_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8961_WSEQ_WRITE_INDEX_WIDTH 5 /* WSEQ_WRITE_INDEX - [4:0] */
+
+/*
+ * R88 (0x58) - Write Sequencer 2
+ */
+#define WM8961_WSEQ_EOS 0x0100 /* WSEQ_EOS */
+#define WM8961_WSEQ_EOS_MASK 0x0100 /* WSEQ_EOS */
+#define WM8961_WSEQ_EOS_SHIFT 8 /* WSEQ_EOS */
+#define WM8961_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
+#define WM8961_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
+#define WM8961_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
+#define WM8961_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
+
+/*
+ * R89 (0x59) - Write Sequencer 3
+ */
+#define WM8961_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
+#define WM8961_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
+#define WM8961_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
+
+/*
+ * R90 (0x5A) - Write Sequencer 4
+ */
+#define WM8961_WSEQ_ABORT 0x0100 /* WSEQ_ABORT */
+#define WM8961_WSEQ_ABORT_MASK 0x0100 /* WSEQ_ABORT */
+#define WM8961_WSEQ_ABORT_SHIFT 8 /* WSEQ_ABORT */
+#define WM8961_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define WM8961_WSEQ_START 0x0080 /* WSEQ_START */
+#define WM8961_WSEQ_START_MASK 0x0080 /* WSEQ_START */
+#define WM8961_WSEQ_START_SHIFT 7 /* WSEQ_START */
+#define WM8961_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define WM8961_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
+#define WM8961_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
+#define WM8961_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
+
+/*
+ * R91 (0x5B) - Write Sequencer 5
+ */
+#define WM8961_WSEQ_DATA_WIDTH_MASK 0x0070 /* WSEQ_DATA_WIDTH - [6:4] */
+#define WM8961_WSEQ_DATA_WIDTH_SHIFT 4 /* WSEQ_DATA_WIDTH - [6:4] */
+#define WM8961_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [6:4] */
+#define WM8961_WSEQ_DATA_START_MASK 0x000F /* WSEQ_DATA_START - [3:0] */
+#define WM8961_WSEQ_DATA_START_SHIFT 0 /* WSEQ_DATA_START - [3:0] */
+#define WM8961_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [3:0] */
+
+/*
+ * R92 (0x5C) - Write Sequencer 6
+ */
+#define WM8961_WSEQ_DELAY_MASK 0x000F /* WSEQ_DELAY - [3:0] */
+#define WM8961_WSEQ_DELAY_SHIFT 0 /* WSEQ_DELAY - [3:0] */
+#define WM8961_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [3:0] */
+
+/*
+ * R93 (0x5D) - Write Sequencer 7
+ */
+#define WM8961_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
+#define WM8961_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
+#define WM8961_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
+#define WM8961_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+
+/*
+ * R252 (0xFC) - General test 1
+ */
+#define WM8961_ARA_ENA 0x0002 /* ARA_ENA */
+#define WM8961_ARA_ENA_MASK 0x0002 /* ARA_ENA */
+#define WM8961_ARA_ENA_SHIFT 1 /* ARA_ENA */
+#define WM8961_ARA_ENA_WIDTH 1 /* ARA_ENA */
+#define WM8961_AUTO_INC 0x0001 /* AUTO_INC */
+#define WM8961_AUTO_INC_MASK 0x0001 /* AUTO_INC */
+#define WM8961_AUTO_INC_SHIFT 0 /* AUTO_INC */
+#define WM8961_AUTO_INC_WIDTH 1 /* AUTO_INC */
+
+#endif
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 8c0fdf84aac3..5385e69d283a 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -981,6 +981,21 @@ static int wm8988_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8988_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8988_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8988_i2c_suspend NULL
+#define wm8988_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8988_i2c_id[] = {
{ "wm8988", 0 },
{ }
@@ -994,6 +1009,8 @@ static struct i2c_driver wm8988_i2c_driver = {
},
.probe = wm8988_i2c_probe,
.remove = wm8988_i2c_remove,
+ .suspend = wm8988_i2c_suspend,
+ .resume = wm8988_i2c_resume,
.id_table = wm8988_i2c_id,
};
#endif
@@ -1051,6 +1068,21 @@ static int __devexit wm8988_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8988_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8988_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+#else
+#define wm8988_spi_suspend NULL
+#define wm8988_spi_resume NULL
+#endif
+
static struct spi_driver wm8988_spi_driver = {
.driver = {
.name = "wm8988",
@@ -1059,6 +1091,8 @@ static struct spi_driver wm8988_spi_driver = {
},
.probe = wm8988_spi_probe,
.remove = __devexit_p(wm8988_spi_remove),
+ .suspend = wm8988_spi_suspend,
+ .resume = wm8988_spi_resume,
};
#endif
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 86fc57e25f97..dbe20597d872 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1492,6 +1492,21 @@ static __devexit int wm9081_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm9081_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm9081_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm9081_i2c_suspend NULL
+#define wm9081_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm9081_i2c_id[] = {
{ "wm9081", 0 },
{ }
@@ -1505,6 +1520,8 @@ static struct i2c_driver wm9081_i2c_driver = {
},
.probe = wm9081_i2c_probe,
.remove = __devexit_p(wm9081_i2c_remove),
+ .suspend = wm9081_i2c_suspend,
+ .resume = wm9081_i2c_resume,
.id_table = wm9081_i2c_id,
};
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 5dbebf82249c..8cb65ccad35f 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -33,7 +33,7 @@ config SND_SOC_MPC5200_I2S
config SND_SOC_MPC5200_AC97
tristate "Freescale MPC5200 PSC in AC97 mode driver"
depends on PPC_MPC52xx && PPC_BESTCOMM
- select AC97_BUS
+ select SND_SOC_AC97_BUS
select SND_MPC52xx_DMA
select PPC_BESTCOMM_GEN_BD
help
@@ -41,7 +41,7 @@ config SND_SOC_MPC5200_AC97
config SND_MPC52xx_SOC_PCM030
tristate "SoC AC97 Audio support for Phytec pcm030 and WM9712"
- depends on PPC_MPC5200_SIMPLE && BROKEN
+ depends on PPC_MPC5200_SIMPLE
select SND_SOC_MPC5200_AC97
select SND_SOC_WM9712
help
@@ -50,7 +50,7 @@ config SND_MPC52xx_SOC_PCM030
config SND_MPC52xx_SOC_EFIKA
tristate "SoC AC97 Audio support for bbplan Efika and STAC9766"
- depends on PPC_EFIKA && BROKEN
+ depends on PPC_EFIKA
select SND_SOC_MPC5200_AC97
select SND_SOC_STAC9766
help
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index b771238662b6..a5a90e594535 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -72,4 +72,11 @@ config SND_OMAP_SOC_OMAP3_BEAGLE
help
Say Y if you want to add support for SoC audio on the Beagleboard.
+config SND_OMAP_SOC_ZOOM2
+ tristate "SoC Audio support for Zoom2"
+ depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP_ZOOM2
+ select SND_OMAP_SOC_MCBSP
+ select SND_SOC_TWL4030
+ help
+ Say Y if you want to add support for Soc audio on Zoom2 board.
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index a37f49862389..fefc48f02bd3 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -14,6 +14,7 @@ snd-soc-omap3evm-objs := omap3evm.o
snd-soc-sdp3430-objs := sdp3430.o
snd-soc-omap3pandora-objs := omap3pandora.o
snd-soc-omap3beagle-objs := omap3beagle.o
+snd-soc-zoom2-objs := zoom2.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
@@ -23,3 +24,4 @@ obj-$(CONFIG_MACH_OMAP3EVM) += snd-soc-omap3evm.o
obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
+obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 6454e15f7d28..84a1950880eb 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -216,12 +216,15 @@ static snd_pcm_uframes_t omap_pcm_pointer(struct snd_pcm_substream *substream)
dma_addr_t ptr;
snd_pcm_uframes_t offset;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- ptr = omap_get_dma_src_pos(prtd->dma_ch);
- else
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
ptr = omap_get_dma_dst_pos(prtd->dma_ch);
+ offset = bytes_to_frames(runtime, ptr - runtime->dma_addr);
+ } else if (!(cpu_is_omap1510())) {
+ ptr = omap_get_dma_src_pos(prtd->dma_ch);
+ offset = bytes_to_frames(runtime, ptr - runtime->dma_addr);
+ } else
+ offset = prtd->period_index * runtime->period_size;
- offset = bytes_to_frames(runtime, ptr - runtime->dma_addr);
if (offset >= runtime->buffer_size)
offset = 0;
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index b719e5db4f57..c51594d8fd13 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -96,7 +96,7 @@ static int sdp3430_hw_voice_params(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_DSP_A |
SND_SOC_DAIFMT_IB_NF |
- SND_SOC_DAIFMT_CBS_CFM);
+ SND_SOC_DAIFMT_CBM_CFM);
if (ret) {
printk(KERN_ERR "can't set codec DAI configuration\n");
return ret;
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
new file mode 100644
index 000000000000..3de6d2bd3903
--- /dev/null
+++ b/sound/soc/omap/zoom2.c
@@ -0,0 +1,301 @@
+/*
+ * zoom2.c -- SoC audio for Zoom2
+ *
+ * Author: Misael Lopez Cruz <x0052729@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+#include <mach/mcbsp.h>
+
+#include "omap-mcbsp.h"
+#include "omap-pcm.h"
+#include "../codecs/twl4030.h"
+
+#define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15)
+
+static int zoom2_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ /* Set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set codec DAI configuration\n");
+ return ret;
+ }
+
+ /* Set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set cpu DAI configuration\n");
+ return ret;
+ }
+
+ /* Set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set codec system clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops zoom2_ops = {
+ .hw_params = zoom2_hw_params,
+};
+
+static int zoom2_hw_voice_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ /* Set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret) {
+ printk(KERN_ERR "can't set codec DAI configuration\n");
+ return ret;
+ }
+
+ /* Set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set cpu DAI configuration\n");
+ return ret;
+ }
+
+ /* Set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set codec system clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops zoom2_voice_ops = {
+ .hw_params = zoom2_hw_voice_params,
+};
+
+/* Zoom2 machine DAPM */
+static const struct snd_soc_dapm_widget zoom2_twl4030_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Ext Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_HP("Headset Stereophone", NULL),
+ SND_SOC_DAPM_LINE("Aux In", NULL),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ /* External Mics: MAINMIC, SUBMIC with bias*/
+ {"MAINMIC", NULL, "Mic Bias 1"},
+ {"SUBMIC", NULL, "Mic Bias 2"},
+ {"Mic Bias 1", NULL, "Ext Mic"},
+ {"Mic Bias 2", NULL, "Ext Mic"},
+
+ /* External Speakers: HFL, HFR */
+ {"Ext Spk", NULL, "HFL"},
+ {"Ext Spk", NULL, "HFR"},
+
+ /* Headset Stereophone: HSOL, HSOR */
+ {"Headset Stereophone", NULL, "HSOL"},
+ {"Headset Stereophone", NULL, "HSOR"},
+
+ /* Headset Mic: HSMIC with bias */
+ {"HSMIC", NULL, "Headset Mic Bias"},
+ {"Headset Mic Bias", NULL, "Headset Mic"},
+
+ /* Aux In: AUXL, AUXR */
+ {"Aux In", NULL, "AUXL"},
+ {"Aux In", NULL, "AUXR"},
+};
+
+static int zoom2_twl4030_init(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ /* Add Zoom2 specific widgets */
+ ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets,
+ ARRAY_SIZE(zoom2_twl4030_dapm_widgets));
+ if (ret)
+ return ret;
+
+ /* Set up Zoom2 specific audio path audio_map */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ /* Zoom2 connected pins */
+ snd_soc_dapm_enable_pin(codec, "Ext Mic");
+ snd_soc_dapm_enable_pin(codec, "Ext Spk");
+ snd_soc_dapm_enable_pin(codec, "Headset Mic");
+ snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
+ snd_soc_dapm_enable_pin(codec, "Aux In");
+
+ /* TWL4030 not connected pins */
+ snd_soc_dapm_nc_pin(codec, "CARKITMIC");
+ snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
+ snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+
+ snd_soc_dapm_nc_pin(codec, "OUTL");
+ snd_soc_dapm_nc_pin(codec, "OUTR");
+ snd_soc_dapm_nc_pin(codec, "EARPIECE");
+ snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
+ snd_soc_dapm_nc_pin(codec, "PREDRIVER");
+ snd_soc_dapm_nc_pin(codec, "CARKITL");
+ snd_soc_dapm_nc_pin(codec, "CARKITR");
+
+ ret = snd_soc_dapm_sync(codec);
+
+ return ret;
+}
+
+static int zoom2_twl4030_voice_init(struct snd_soc_codec *codec)
+{
+ unsigned short reg;
+
+ /* Enable voice interface */
+ reg = codec->read(codec, TWL4030_REG_VOICE_IF);
+ reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN;
+ codec->write(codec, TWL4030_REG_VOICE_IF, reg);
+
+ return 0;
+}
+
+/* Digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link zoom2_dai[] = {
+ {
+ .name = "TWL4030 I2S",
+ .stream_name = "TWL4030 Audio",
+ .cpu_dai = &omap_mcbsp_dai[0],
+ .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
+ .init = zoom2_twl4030_init,
+ .ops = &zoom2_ops,
+ },
+ {
+ .name = "TWL4030 PCM",
+ .stream_name = "TWL4030 Voice",
+ .cpu_dai = &omap_mcbsp_dai[1],
+ .codec_dai = &twl4030_dai[TWL4030_DAI_VOICE],
+ .init = zoom2_twl4030_voice_init,
+ .ops = &zoom2_voice_ops,
+ },
+};
+
+/* Audio machine driver */
+static struct snd_soc_card snd_soc_zoom2 = {
+ .name = "Zoom2",
+ .platform = &omap_soc_platform,
+ .dai_link = zoom2_dai,
+ .num_links = ARRAY_SIZE(zoom2_dai),
+};
+
+/* twl4030 setup */
+static struct twl4030_setup_data twl4030_setup = {
+ .ramp_delay_value = 2, /* 81 ms */
+ .sysclk = 26000,
+};
+
+/* Audio subsystem */
+static struct snd_soc_device zoom2_snd_devdata = {
+ .card = &snd_soc_zoom2,
+ .codec_dev = &soc_codec_dev_twl4030,
+ .codec_data = &twl4030_setup,
+};
+
+static struct platform_device *zoom2_snd_device;
+
+static int __init zoom2_soc_init(void)
+{
+ int ret;
+
+ if (!machine_is_omap_zoom2()) {
+ pr_debug("Not Zoom2!\n");
+ return -ENODEV;
+ }
+ printk(KERN_INFO "Zoom2 SoC init\n");
+
+ zoom2_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!zoom2_snd_device) {
+ printk(KERN_ERR "Platform device allocation failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(zoom2_snd_device, &zoom2_snd_devdata);
+ zoom2_snd_devdata.dev = &zoom2_snd_device->dev;
+ *(unsigned int *)zoom2_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
+ *(unsigned int *)zoom2_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
+
+ ret = platform_device_add(zoom2_snd_device);
+ if (ret)
+ goto err1;
+
+ BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0);
+ gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0);
+
+ return 0;
+
+err1:
+ printk(KERN_ERR "Unable to add platform device\n");
+ platform_device_put(zoom2_snd_device);
+
+ return ret;
+}
+module_init(zoom2_soc_init);
+
+static void __exit zoom2_soc_exit(void)
+{
+ gpio_free(ZOOM2_HEADSET_MUX_GPIO);
+
+ platform_device_unregister(zoom2_snd_device);
+}
+module_exit(zoom2_soc_exit);
+
+MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>");
+MODULE_DESCRIPTION("ALSA SoC Zoom2");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 326955dea36c..8889cd371608 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -20,12 +20,14 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
+#include <sound/uda1380.h>
#include <mach/magician.h>
#include <asm/mach-types.h>
@@ -447,34 +449,47 @@ static struct snd_soc_card snd_soc_card_magician = {
.platform = &pxa2xx_soc_platform,
};
-/* magician audio private data */
-static struct uda1380_setup_data magician_uda1380_setup = {
- .i2c_address = 0x18,
- .dac_clk = UDA1380_DAC_CLK_WSPLL,
-};
-
/* magician audio subsystem */
static struct snd_soc_device magician_snd_devdata = {
.card = &snd_soc_card_magician,
.codec_dev = &soc_codec_dev_uda1380,
- .codec_data = &magician_uda1380_setup,
};
static struct platform_device *magician_snd_device;
+/*
+ * FIXME: move into magician board file once merged into the pxa tree
+ */
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
+ .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
+ .dac_clk = UDA1380_DAC_CLK_WSPLL,
+};
+
+static struct i2c_board_info i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x18),
+ .platform_data = &uda1380_info,
+ },
+};
+
static int __init magician_init(void)
{
int ret;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
if (!machine_is_magician())
return -ENODEV;
- ret = gpio_request(EGPIO_MAGICIAN_CODEC_POWER, "CODEC_POWER");
- if (ret)
- goto err_request_power;
- ret = gpio_request(EGPIO_MAGICIAN_CODEC_RESET, "CODEC_RESET");
- if (ret)
- goto err_request_reset;
+ adapter = i2c_get_adapter(0);
+ if (!adapter)
+ return -ENODEV;
+ client = i2c_new_device(adapter, i2c_board_info);
+ i2c_put_adapter(adapter);
+ if (!client)
+ return -ENODEV;
+
ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
if (ret)
goto err_request_spk;
@@ -491,14 +506,8 @@ static int __init magician_init(void)
if (ret)
goto err_request_in_sel1;
- gpio_set_value(EGPIO_MAGICIAN_CODEC_POWER, 1);
gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
- /* we may need to have the clock running here - pH5 */
- gpio_set_value(EGPIO_MAGICIAN_CODEC_RESET, 1);
- udelay(5);
- gpio_set_value(EGPIO_MAGICIAN_CODEC_RESET, 0);
-
magician_snd_device = platform_device_alloc("soc-audio", -1);
if (!magician_snd_device) {
ret = -ENOMEM;
@@ -526,10 +535,6 @@ err_request_mic:
err_request_ep:
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
err_request_spk:
- gpio_free(EGPIO_MAGICIAN_CODEC_RESET);
-err_request_reset:
- gpio_free(EGPIO_MAGICIAN_CODEC_POWER);
-err_request_power:
return ret;
}
@@ -540,15 +545,12 @@ static void __exit magician_exit(void)
gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
- gpio_set_value(EGPIO_MAGICIAN_CODEC_POWER, 0);
gpio_free(EGPIO_MAGICIAN_IN_SEL1);
gpio_free(EGPIO_MAGICIAN_IN_SEL0);
gpio_free(EGPIO_MAGICIAN_MIC_POWER);
gpio_free(EGPIO_MAGICIAN_EP_POWER);
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
- gpio_free(EGPIO_MAGICIAN_CODEC_RESET);
- gpio_free(EGPIO_MAGICIAN_CODEC_POWER);
}
module_init(magician_init);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 19c45409d94c..e22c5cef8fec 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -457,31 +457,27 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
return -EINVAL;
}
- ssp_write_reg(ssp, SSCR0, sscr0);
- ssp_write_reg(ssp, SSCR1, sscr1);
- ssp_write_reg(ssp, SSPSP, sspsp);
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ sspsp |= SSPSP_SFRMP;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ sspsp |= SSPSP_SCMODE(2);
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
+ break;
+ default:
+ return -EINVAL;
+ }
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
sscr0 |= SSCR0_PSP;
sscr1 |= SSCR1_RWOT | SSCR1_TRAIL;
-
/* See hw_params() */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- sspsp |= SSPSP_SFRMP;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- break;
- case SND_SOC_DAIFMT_IB_IF:
- sspsp |= SSPSP_SCMODE(2);
- break;
- case SND_SOC_DAIFMT_IB_NF:
- sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
- break;
- default:
- return -EINVAL;
- }
break;
case SND_SOC_DAIFMT_DSP_A:
@@ -489,22 +485,6 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_DSP_B:
sscr0 |= SSCR0_MOD | SSCR0_PSP;
sscr1 |= SSCR1_TRAIL | SSCR1_RWOT;
-
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- sspsp |= SSPSP_SFRMP;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- break;
- case SND_SOC_DAIFMT_IB_IF:
- sspsp |= SSPSP_SCMODE(2);
- break;
- case SND_SOC_DAIFMT_IB_NF:
- sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
- break;
- default:
- return -EINVAL;
- }
break;
default:
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 4743e262895d..6b8f655d1ad8 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -167,6 +167,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
BUG_ON(IS_ERR(clk_i2s));
clk_enable(clk_i2s);
+ dai->private_data = dai;
pxa_i2s_wait();
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -255,7 +256,10 @@ static void pxa2xx_i2s_shutdown(struct snd_pcm_substream *substream,
if ((SACR1 & (SACR1_DREC | SACR1_DRPL)) == (SACR1_DREC | SACR1_DRPL)) {
SACR0 &= ~SACR0_ENB;
pxa_i2s_wait();
- clk_disable(clk_i2s);
+ if (dai->private_data != NULL) {
+ clk_disable(clk_i2s);
+ dai->private_data = NULL;
+ }
}
}
@@ -336,6 +340,7 @@ static int pxa2xx_i2s_probe(struct platform_device *dev)
return PTR_ERR(clk_i2s);
pxa_i2s_dai.dev = &dev->dev;
+ pxa_i2s_dai.private_data = NULL;
ret = snd_soc_register_dai(&pxa_i2s_dai);
if (ret != 0)
clk_put(clk_i2s);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1d70829464ef..dfc03c0bacb6 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -619,8 +619,9 @@ static struct snd_pcm_ops soc_pcm_ops = {
#ifdef CONFIG_PM
/* powers down audio subsystem for suspend */
-static int soc_suspend(struct platform_device *pdev, pm_message_t state)
+static int soc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_card *card = socdev->card;
struct snd_soc_platform *platform = card->platform;
@@ -656,7 +657,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
snd_pcm_suspend_all(card->dai_link[i].pcm);
if (card->suspend_pre)
- card->suspend_pre(pdev, state);
+ card->suspend_pre(pdev, PMSG_SUSPEND);
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
@@ -682,7 +683,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
}
if (codec_dev->suspend)
- codec_dev->suspend(pdev, state);
+ codec_dev->suspend(pdev, PMSG_SUSPEND);
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
@@ -691,7 +692,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
}
if (card->suspend_post)
- card->suspend_post(pdev, state);
+ card->suspend_post(pdev, PMSG_SUSPEND);
return 0;
}
@@ -765,8 +766,9 @@ static void soc_resume_deferred(struct work_struct *work)
}
/* powers up audio subsystem after a suspend */
-static int soc_resume(struct platform_device *pdev)
+static int soc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_card *card = socdev->card;
struct snd_soc_dai *cpu_dai = card->dai_link[0].cpu_dai;
@@ -788,6 +790,44 @@ static int soc_resume(struct platform_device *pdev)
return 0;
}
+/**
+ * snd_soc_suspend_device: Notify core of device suspend
+ *
+ * @dev: Device being suspended.
+ *
+ * In order to ensure that the entire audio subsystem is suspended in a
+ * coordinated fashion ASoC devices should suspend themselves when
+ * called by ASoC. When the standard kernel suspend process asks the
+ * device to suspend it should call this function to initiate a suspend
+ * of the entire ASoC card.
+ *
+ * \note Currently this function is stubbed out.
+ */
+int snd_soc_suspend_device(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_suspend_device);
+
+/**
+ * snd_soc_resume_device: Notify core of device resume
+ *
+ * @dev: Device being resumed.
+ *
+ * In order to ensure that the entire audio subsystem is resumed in a
+ * coordinated fashion ASoC devices should resume themselves when called
+ * by ASoC. When the standard kernel resume process asks the device
+ * to resume it should call this function. Once all the components of
+ * the card have notified that they are ready to be resumed the card
+ * will be resumed.
+ *
+ * \note Currently this function is stubbed out.
+ */
+int snd_soc_resume_device(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_resume_device);
#else
#define soc_suspend NULL
#define soc_resume NULL
@@ -981,16 +1021,39 @@ static int soc_remove(struct platform_device *pdev)
return 0;
}
+static int soc_poweroff(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = socdev->card;
+
+ if (!card->instantiated)
+ return 0;
+
+ /* Flush out pmdown_time work - we actually do want to run it
+ * now, we're shutting down so no imminent restart. */
+ run_delayed_work(&card->delayed_work);
+
+ snd_soc_dapm_shutdown(socdev);
+
+ return 0;
+}
+
+static struct dev_pm_ops soc_pm_ops = {
+ .suspend = soc_suspend,
+ .resume = soc_resume,
+ .poweroff = soc_poweroff,
+};
+
/* ASoC platform driver */
static struct platform_driver soc_driver = {
.driver = {
.name = "soc-audio",
.owner = THIS_MODULE,
+ .pm = &soc_pm_ops,
},
.probe = soc_probe,
.remove = soc_remove,
- .suspend = soc_suspend,
- .resume = soc_resume,
};
/* create a new pcm */
@@ -1264,10 +1327,10 @@ EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
* Returns 1 for change else 0.
*/
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value)
+ unsigned int mask, unsigned int value)
{
int change;
- unsigned short old, new;
+ unsigned int old, new;
mutex_lock(&io_mutex);
old = snd_soc_read(codec, reg);
@@ -1294,10 +1357,10 @@ EXPORT_SYMBOL_GPL(snd_soc_update_bits);
* Returns 1 for change else 0.
*/
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value)
+ unsigned int mask, unsigned int value)
{
int change;
- unsigned short old, new;
+ unsigned int old, new;
mutex_lock(&io_mutex);
old = snd_soc_read(codec, reg);
@@ -1586,7 +1649,7 @@ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, bitmask;
+ unsigned int val, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
@@ -1615,8 +1678,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val;
- unsigned short mask, bitmask;
+ unsigned int val;
+ unsigned int mask, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
@@ -1652,7 +1715,7 @@ int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short reg_val, val, mux;
+ unsigned int reg_val, val, mux;
reg_val = snd_soc_read(codec, e->reg);
val = (reg_val >> e->shift_l) & e->mask;
@@ -1691,8 +1754,8 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val;
- unsigned short mask;
+ unsigned int val;
+ unsigned int mask;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
@@ -1852,7 +1915,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- unsigned short val, val2, val_mask;
+ unsigned int val, val2, val_mask;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
@@ -1918,7 +1981,7 @@ int snd_soc_get_volsw_2r(struct snd_kcontrol *kcontrol,
unsigned int reg2 = mc->rreg;
unsigned int shift = mc->shift;
int max = mc->max;
- unsigned int mask = (1<<fls(max))-1;
+ unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
ucontrol->value.integer.value[0] =
@@ -1958,7 +2021,7 @@ int snd_soc_put_volsw_2r(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
int err;
- unsigned short val, val2, val_mask;
+ unsigned int val, val2, val_mask;
val_mask = mask << shift;
val = (ucontrol->value.integer.value[0] & mask);
@@ -2050,7 +2113,7 @@ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
int min = mc->min;
- unsigned short val;
+ unsigned int val;
val = (ucontrol->value.integer.value[0]+min) & 0xff;
val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 21c69074aa17..5157ec110cfa 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -52,19 +52,37 @@
/* dapm power sequences - make this per codec in the future */
static int dapm_up_seq[] = {
- snd_soc_dapm_pre, snd_soc_dapm_supply, snd_soc_dapm_micbias,
- snd_soc_dapm_mic, snd_soc_dapm_mux, snd_soc_dapm_value_mux,
- snd_soc_dapm_dac, snd_soc_dapm_mixer, snd_soc_dapm_mixer_named_ctl,
- snd_soc_dapm_pga, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk,
- snd_soc_dapm_post
+ [snd_soc_dapm_pre] = 0,
+ [snd_soc_dapm_supply] = 1,
+ [snd_soc_dapm_micbias] = 2,
+ [snd_soc_dapm_mic] = 3,
+ [snd_soc_dapm_mux] = 4,
+ [snd_soc_dapm_value_mux] = 4,
+ [snd_soc_dapm_dac] = 5,
+ [snd_soc_dapm_mixer] = 6,
+ [snd_soc_dapm_mixer_named_ctl] = 6,
+ [snd_soc_dapm_pga] = 7,
+ [snd_soc_dapm_adc] = 8,
+ [snd_soc_dapm_hp] = 9,
+ [snd_soc_dapm_spk] = 10,
+ [snd_soc_dapm_post] = 11,
};
static int dapm_down_seq[] = {
- snd_soc_dapm_pre, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk,
- snd_soc_dapm_pga, snd_soc_dapm_mixer_named_ctl, snd_soc_dapm_mixer,
- snd_soc_dapm_dac, snd_soc_dapm_mic, snd_soc_dapm_micbias,
- snd_soc_dapm_mux, snd_soc_dapm_value_mux, snd_soc_dapm_supply,
- snd_soc_dapm_post
+ [snd_soc_dapm_pre] = 0,
+ [snd_soc_dapm_adc] = 1,
+ [snd_soc_dapm_hp] = 2,
+ [snd_soc_dapm_spk] = 3,
+ [snd_soc_dapm_pga] = 4,
+ [snd_soc_dapm_mixer_named_ctl] = 5,
+ [snd_soc_dapm_mixer] = 5,
+ [snd_soc_dapm_dac] = 6,
+ [snd_soc_dapm_mic] = 7,
+ [snd_soc_dapm_micbias] = 8,
+ [snd_soc_dapm_mux] = 9,
+ [snd_soc_dapm_value_mux] = 9,
+ [snd_soc_dapm_supply] = 10,
+ [snd_soc_dapm_post] = 11,
};
static void pop_wait(u32 pop_time)
@@ -268,7 +286,7 @@ static int dapm_connect_mixer(struct snd_soc_codec *codec,
static int dapm_update_bits(struct snd_soc_dapm_widget *widget)
{
int change, power;
- unsigned short old, new;
+ unsigned int old, new;
struct snd_soc_codec *codec = widget->codec;
/* check for valid widgets */
@@ -689,53 +707,211 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
return power;
}
-/*
- * Scan a single DAPM widget for a complete audio path and update the
- * power status appropriately.
- */
-static int dapm_power_widget(struct snd_soc_codec *codec, int event,
- struct snd_soc_dapm_widget *w)
+static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
+ struct snd_soc_dapm_widget *b,
+ int sort[])
{
- int ret;
+ if (sort[a->id] != sort[b->id])
+ return sort[a->id] - sort[b->id];
+ if (a->reg != b->reg)
+ return a->reg - b->reg;
- switch (w->id) {
- case snd_soc_dapm_pre:
- if (!w->event)
- return 0;
+ return 0;
+}
- if (event == SND_SOC_DAPM_STREAM_START) {
- ret = w->event(w,
- NULL, SND_SOC_DAPM_PRE_PMU);
+/* Insert a widget in order into a DAPM power sequence. */
+static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget,
+ struct list_head *list,
+ int sort[])
+{
+ struct snd_soc_dapm_widget *w;
+
+ list_for_each_entry(w, list, power_list)
+ if (dapm_seq_compare(new_widget, w, sort) < 0) {
+ list_add_tail(&new_widget->power_list, &w->power_list);
+ return;
+ }
+
+ list_add_tail(&new_widget->power_list, list);
+}
+
+/* Apply the coalesced changes from a DAPM sequence */
+static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
+ struct list_head *pending)
+{
+ struct snd_soc_dapm_widget *w;
+ int reg, power, ret;
+ unsigned int value = 0;
+ unsigned int mask = 0;
+ unsigned int cur_mask;
+
+ reg = list_first_entry(pending, struct snd_soc_dapm_widget,
+ power_list)->reg;
+
+ list_for_each_entry(w, pending, power_list) {
+ cur_mask = 1 << w->shift;
+ BUG_ON(reg != w->reg);
+
+ if (w->invert)
+ power = !w->power;
+ else
+ power = w->power;
+
+ mask |= cur_mask;
+ if (power)
+ value |= cur_mask;
+
+ pop_dbg(codec->pop_time,
+ "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
+ w->name, reg, value, mask);
+
+ /* power up pre event */
+ if (w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
+ pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n",
+ w->name);
+ ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
if (ret < 0)
- return ret;
- } else if (event == SND_SOC_DAPM_STREAM_STOP) {
- ret = w->event(w,
- NULL, SND_SOC_DAPM_PRE_PMD);
+ pr_err("%s: pre event failed: %d\n",
+ w->name, ret);
+ }
+
+ /* power down pre event */
+ if (!w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
+ pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n",
+ w->name);
+ ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
if (ret < 0)
- return ret;
+ pr_err("%s: pre event failed: %d\n",
+ w->name, ret);
}
- return 0;
- case snd_soc_dapm_post:
- if (!w->event)
- return 0;
+ /* Lower PGA volume to reduce pops */
+ if (w->id == snd_soc_dapm_pga && !w->power)
+ dapm_set_pga(w, w->power);
+ }
- if (event == SND_SOC_DAPM_STREAM_START) {
+ if (reg >= 0) {
+ pop_dbg(codec->pop_time,
+ "pop test : Applying 0x%x/0x%x to %x in %dms\n",
+ value, mask, reg, codec->pop_time);
+ pop_wait(codec->pop_time);
+ snd_soc_update_bits(codec, reg, mask, value);
+ }
+
+ list_for_each_entry(w, pending, power_list) {
+ /* Raise PGA volume to reduce pops */
+ if (w->id == snd_soc_dapm_pga && w->power)
+ dapm_set_pga(w, w->power);
+
+ /* power up post event */
+ if (w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
+ pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n",
+ w->name);
ret = w->event(w,
NULL, SND_SOC_DAPM_POST_PMU);
if (ret < 0)
- return ret;
- } else if (event == SND_SOC_DAPM_STREAM_STOP) {
- ret = w->event(w,
- NULL, SND_SOC_DAPM_POST_PMD);
+ pr_err("%s: post event failed: %d\n",
+ w->name, ret);
+ }
+
+ /* power down post event */
+ if (!w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
+ pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n",
+ w->name);
+ ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
if (ret < 0)
- return ret;
+ pr_err("%s: post event failed: %d\n",
+ w->name, ret);
}
- return 0;
+ }
+}
- default:
- return dapm_generic_apply_power(w);
+/* Apply a DAPM power sequence.
+ *
+ * We walk over a pre-sorted list of widgets to apply power to. In
+ * order to minimise the number of writes to the device required
+ * multiple widgets will be updated in a single write where possible.
+ * Currently anything that requires more than a single write is not
+ * handled.
+ */
+static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list,
+ int event, int sort[])
+{
+ struct snd_soc_dapm_widget *w, *n;
+ LIST_HEAD(pending);
+ int cur_sort = -1;
+ int cur_reg = SND_SOC_NOPM;
+ int ret;
+
+ list_for_each_entry_safe(w, n, list, power_list) {
+ ret = 0;
+
+ /* Do we need to apply any queued changes? */
+ if (sort[w->id] != cur_sort || w->reg != cur_reg) {
+ if (!list_empty(&pending))
+ dapm_seq_run_coalesced(codec, &pending);
+
+ INIT_LIST_HEAD(&pending);
+ cur_sort = -1;
+ cur_reg = SND_SOC_NOPM;
+ }
+
+ switch (w->id) {
+ case snd_soc_dapm_pre:
+ if (!w->event)
+ list_for_each_entry_safe_continue(w, n, list,
+ power_list);
+
+ if (event == SND_SOC_DAPM_STREAM_START)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_PRE_PMU);
+ else if (event == SND_SOC_DAPM_STREAM_STOP)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_PRE_PMD);
+ break;
+
+ case snd_soc_dapm_post:
+ if (!w->event)
+ list_for_each_entry_safe_continue(w, n, list,
+ power_list);
+
+ if (event == SND_SOC_DAPM_STREAM_START)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_POST_PMU);
+ else if (event == SND_SOC_DAPM_STREAM_STOP)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_POST_PMD);
+ break;
+
+ case snd_soc_dapm_input:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_hp:
+ case snd_soc_dapm_mic:
+ case snd_soc_dapm_line:
+ case snd_soc_dapm_spk:
+ /* No register support currently */
+ ret = dapm_generic_apply_power(w);
+ break;
+
+ default:
+ /* Queue it up for application */
+ cur_sort = sort[w->id];
+ cur_reg = w->reg;
+ list_move(&w->power_list, &pending);
+ break;
+ }
+
+ if (ret < 0)
+ pr_err("Failed to apply widget power: %d\n",
+ ret);
}
+
+ if (!list_empty(&pending))
+ dapm_seq_run_coalesced(codec, &pending);
}
/*
@@ -751,23 +927,22 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
{
struct snd_soc_device *socdev = codec->socdev;
struct snd_soc_dapm_widget *w;
+ LIST_HEAD(up_list);
+ LIST_HEAD(down_list);
int ret = 0;
- int i, power;
+ int power;
int sys_power = 0;
- INIT_LIST_HEAD(&codec->up_list);
- INIT_LIST_HEAD(&codec->down_list);
-
/* Check which widgets we need to power and store them in
* lists indicating if they should be powered up or down.
*/
list_for_each_entry(w, &codec->dapm_widgets, list) {
switch (w->id) {
case snd_soc_dapm_pre:
- list_add_tail(&codec->down_list, &w->power_list);
+ dapm_seq_insert(w, &down_list, dapm_down_seq);
break;
case snd_soc_dapm_post:
- list_add_tail(&codec->up_list, &w->power_list);
+ dapm_seq_insert(w, &up_list, dapm_up_seq);
break;
default:
@@ -782,10 +957,9 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
continue;
if (power)
- list_add_tail(&w->power_list, &codec->up_list);
+ dapm_seq_insert(w, &up_list, dapm_up_seq);
else
- list_add_tail(&w->power_list,
- &codec->down_list);
+ dapm_seq_insert(w, &down_list, dapm_down_seq);
w->power = power;
break;
@@ -802,32 +976,10 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
}
/* Power down widgets first; try to avoid amplifying pops. */
- for (i = 0; i < ARRAY_SIZE(dapm_down_seq); i++) {
- list_for_each_entry(w, &codec->down_list, power_list) {
- /* is widget in stream order */
- if (w->id != dapm_down_seq[i])
- continue;
-
- ret = dapm_power_widget(codec, event, w);
- if (ret != 0)
- pr_err("Failed to power down %s: %d\n",
- w->name, ret);
- }
- }
+ dapm_seq_run(codec, &down_list, event, dapm_down_seq);
/* Now power up. */
- for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) {
- list_for_each_entry(w, &codec->up_list, power_list) {
- /* is widget in stream order */
- if (w->id != dapm_up_seq[i])
- continue;
-
- ret = dapm_power_widget(codec, event, w);
- if (ret != 0)
- pr_err("Failed to power up %s: %d\n",
- w->name, ret);
- }
- }
+ dapm_seq_run(codec, &up_list, event, dapm_up_seq);
/* If we just powered the last thing off drop to standby bias */
if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
@@ -1372,7 +1524,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- unsigned short val, val2, val_mask;
+ unsigned int val, val2, val_mask;
int ret;
val = (ucontrol->value.integer.value[0] & mask);
@@ -1436,7 +1588,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, bitmask;
+ unsigned int val, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
@@ -1464,8 +1616,8 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, mux;
- unsigned short mask, bitmask;
+ unsigned int val, mux;
+ unsigned int mask, bitmask;
int ret = 0;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
@@ -1523,7 +1675,7 @@ int snd_soc_dapm_get_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short reg_val, val, mux;
+ unsigned int reg_val, val, mux;
reg_val = snd_soc_read(widget->codec, e->reg);
val = (reg_val >> e->shift_l) & e->mask;
@@ -1563,8 +1715,8 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, mux;
- unsigned short mask;
+ unsigned int val, mux;
+ unsigned int mask;
int ret = 0;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
@@ -1880,6 +2032,36 @@ void snd_soc_dapm_free(struct snd_soc_device *socdev)
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
+/*
+ * snd_soc_dapm_shutdown - callback for system shutdown
+ */
+void snd_soc_dapm_shutdown(struct snd_soc_device *socdev)
+{
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct snd_soc_dapm_widget *w;
+ LIST_HEAD(down_list);
+ int powerdown = 0;
+
+ list_for_each_entry(w, &codec->dapm_widgets, list) {
+ if (w->power) {
+ dapm_seq_insert(w, &down_list, dapm_down_seq);
+ w->power = 0;
+ powerdown = 1;
+ }
+ }
+
+ /* If there were no widgets to power down we're already in
+ * standby.
+ */
+ if (powerdown) {
+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_PREPARE);
+ dapm_seq_run(codec, &down_list, 0, dapm_down_seq);
+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_STANDBY);
+ }
+
+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_OFF);
+}
+
/* Module information */
MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk");
MODULE_DESCRIPTION("Dynamic Audio Power Management core for ALSA SoC");
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 938a58a5a244..efed64b8b026 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -297,15 +297,17 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
static bool filter(struct dma_chan *chan, void *param)
{
struct txx9aclc_dmadata *dmadata = param;
- char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */
+ char *devname;
+ bool found = false;
- snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name,
+ devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
(int)dmadata->dma_res->start);
if (strcmp(dev_name(chan->device->dev), devname) == 0) {
chan->private = &dmadata->dma_slave;
- return true;
+ found = true;
}
- return false;
+ kfree(devname);
+ return found;
}
static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 12522e6913d9..a41f8b127f49 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -10,6 +10,8 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
#include <sound/core.h>
#ifdef CONFIG_SOUND_OSS_CORE
@@ -29,6 +31,8 @@ MODULE_LICENSE("GPL");
static char *sound_nodename(struct device *dev)
{
+ if (MAJOR(dev->devt) == SOUND_MAJOR)
+ return NULL;
return kasprintf(GFP_KERNEL, "snd/%s", dev_name(dev));
}
@@ -104,7 +108,6 @@ module_exit(cleanup_soundcore);
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sound.h>
-#include <linux/major.h>
#include <linux/kmod.h>
#define SOUND_STEP 16
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 0e5db719de24..de38108f0b28 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -35,7 +35,7 @@
#include "input.h"
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
-MODULE_DESCRIPTION("caiaq USB audio, version 1.3.17");
+MODULE_DESCRIPTION("caiaq USB audio, version 1.3.18");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
"{Native Instruments, RigKontrol3},"
@@ -349,7 +349,9 @@ static void __devinit setup_card(struct snd_usb_caiaqdev *dev)
log("Unable to set up control system (ret=%d)\n", ret);
}
-static int create_card(struct usb_device* usb_dev, struct snd_card **cardp)
+static int create_card(struct usb_device *usb_dev,
+ struct usb_interface *intf,
+ struct snd_card **cardp)
{
int devnum;
int err;
@@ -374,7 +376,7 @@ static int create_card(struct usb_device* usb_dev, struct snd_card **cardp)
dev->chip.usb_id = USB_ID(le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct));
spin_lock_init(&dev->spinlock);
- snd_card_set_dev(card, &usb_dev->dev);
+ snd_card_set_dev(card, &intf->dev);
*cardp = card;
return 0;
@@ -461,7 +463,7 @@ static int __devinit snd_probe(struct usb_interface *intf,
struct snd_card *card;
struct usb_device *device = interface_to_usbdev(intf);
- ret = create_card(device, &card);
+ ret = create_card(device, intf, &card);
if (ret < 0)
return ret;
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index 4bd3a7a0edc1..b81ed298c15a 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -461,7 +461,7 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *_tlv)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
- DECLARE_TLV_DB_SCALE(scale, 0, 0, 0);
+ DECLARE_TLV_DB_MINMAX(scale, 0, 0);
if (size < sizeof(scale))
return -ENOMEM;
@@ -469,7 +469,16 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
* while ALSA TLV contains in 1/100 dB unit
*/
scale[2] = (convert_signed_value(cval, cval->min) * 100) / 256;
- scale[3] = (convert_signed_value(cval, cval->res) * 100) / 256;
+ scale[3] = (convert_signed_value(cval, cval->max) * 100) / 256;
+ if (scale[3] <= scale[2]) {
+ /* something is wrong; assume it's either from/to 0dB */
+ if (scale[2] < 0)
+ scale[3] = 0;
+ else if (scale[2] > 0)
+ scale[2] = 0;
+ else /* totally crap, return an error */
+ return -EINVAL;
+ }
if (copy_to_user(_tlv, scale, sizeof(scale)))
return -EFAULT;
return 0;
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index a5aae9d67f31..fd44946ce4b3 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -514,7 +514,6 @@ static int usx2y_create_card(struct usb_device *device, struct snd_card **cardp)
US122L(card)->chip.dev->bus->busnum,
US122L(card)->chip.dev->devnum
);
- snd_card_set_dev(card, &device->dev);
*cardp = card;
return 0;
}
@@ -531,6 +530,7 @@ static int us122l_usb_probe(struct usb_interface *intf,
if (err < 0)
return err;
+ snd_card_set_dev(card, &intf->dev);
if (!us122l_create_card(card)) {
snd_card_free(card);
return -EINVAL;
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index 5ce0da23ee96..cb4bb8373ca2 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -364,7 +364,6 @@ static int usX2Y_create_card(struct usb_device *device, struct snd_card **cardp)
0,//us428(card)->usbmidi.ifnum,
usX2Y(card)->chip.dev->bus->busnum, usX2Y(card)->chip.dev->devnum
);
- snd_card_set_dev(card, &device->dev);
*cardp = card;
return 0;
}
@@ -388,6 +387,7 @@ static int usX2Y_usb_probe(struct usb_device *device,
err = usX2Y_create_card(device, &card);
if (err < 0)
return err;
+ snd_card_set_dev(card, &intf->dev);
if ((err = usX2Y_hwdep_new(card, device)) < 0 ||
(err = snd_card_register(card)) < 0) {
snd_card_free(card);
diff --git a/tools/perf/CREDITS b/tools/perf/CREDITS
new file mode 100644
index 000000000000..c2ddcb3acbd0
--- /dev/null
+++ b/tools/perf/CREDITS
@@ -0,0 +1,30 @@
+Most of the infrastructure that 'perf' uses here has been reused
+from the Git project, as of version:
+
+ 66996ec: Sync with 1.6.2.4
+
+Here is an (incomplete!) list of main contributors to those files
+in util/* and elsewhere:
+
+ Alex Riesen
+ Christian Couder
+ Dmitry Potapov
+ Jeff King
+ Johannes Schindelin
+ Johannes Sixt
+ Junio C Hamano
+ Linus Torvalds
+ Matthias Kestenholz
+ Michal Ostrowski
+ Miklos Vajna
+ Petr Baudis
+ Pierre Habouzit
+ René Scharfe
+ Samuel Tardieu
+ Shawn O. Pearce
+ Steffen Prohaska
+ Steve Haslam
+
+Thanks guys!
+
+The full history of the files can be found in the upstream Git commits.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 52d3fc6846a9..8aa3f8c88707 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -13,13 +13,25 @@ SYNOPSIS
DESCRIPTION
-----------
This command displays the performance counter profile information recorded
-via perf report.
+via perf record.
OPTIONS
-------
-i::
--input=::
Input file name. (default: perf.data)
+-d::
+--dsos=::
+ Only consider symbols in these dsos. CSV that understands
+ file://filename entries.
+-C::
+--comms=::
+ Only consider symbols in these comms. CSV that understands
+ file://filename entries.
+-S::
+--symbols=::
+ Only consider these symbols. CSV that understands
+ file://filename entries.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c368a72721d7..0d74346d21ab 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -8,8 +8,8 @@ perf-stat - Run a command and gather performance counter statistics
SYNOPSIS
--------
[verse]
-'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] <command>
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] -- <command> [<options>]
DESCRIPTION
-----------
@@ -40,7 +40,7 @@ OPTIONS
-a::
system-wide collection
--l::
+-S::
scale counter values
EXAMPLES
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 36d7eef49913..7822b3d6baca 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -164,7 +164,7 @@ endif
# CFLAGS and LDFLAGS are for the users to override from the command line.
-CFLAGS = $(M64) -ggdb3 -Wall -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -Werror -O6
+CFLAGS = $(M64) -ggdb3 -Wall -Wextra -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -Werror -O6
LDFLAGS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
@@ -223,7 +223,7 @@ SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
# Those must not be GNU-specific; they are shared with perl/ which may
# be built by a different compiler. (Note that this is an artifact now
# but it still might be nice to keep that distinction.)
-BASIC_CFLAGS =
+BASIC_CFLAGS = -Iutil/include
BASIC_LDFLAGS =
# Guard against environment variables
@@ -289,10 +289,11 @@ export PERL_PATH
LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_counter.h
+LIB_H += ../../include/linux/rbtree.h
+LIB_H += ../../include/linux/list.h
+LIB_H += util/include/linux/list.h
LIB_H += perf.h
-LIB_H += types.h
-LIB_H += util/list.h
-LIB_H += util/rbtree.h
+LIB_H += util/types.h
LIB_H += util/levenshtein.h
LIB_H += util/parse-options.h
LIB_H += util/parse-events.h
@@ -301,9 +302,11 @@ LIB_H += util/util.h
LIB_H += util/help.h
LIB_H += util/strbuf.h
LIB_H += util/string.h
+LIB_H += util/strlist.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
+LIB_H += util/module.h
LIB_H += util/color.h
LIB_OBJS += util/abspath.o
@@ -322,12 +325,16 @@ LIB_OBJS += util/run-command.o
LIB_OBJS += util/quote.o
LIB_OBJS += util/strbuf.o
LIB_OBJS += util/string.o
+LIB_OBJS += util/strlist.o
LIB_OBJS += util/usage.o
LIB_OBJS += util/wrapper.o
LIB_OBJS += util/sigchain.o
LIB_OBJS += util/symbol.o
+LIB_OBJS += util/module.o
LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
+LIB_OBJS += util/header.o
+LIB_OBJS += util/callchain.o
BUILTIN_OBJS += builtin-annotate.o
BUILTIN_OBJS += builtin-help.o
@@ -377,12 +384,6 @@ ifndef CC_LD_DYNPATH
endif
endif
-ifdef ZLIB_PATH
- BASIC_CFLAGS += -I$(ZLIB_PATH)/include
- EXTLIBS += -L$(ZLIB_PATH)/$(lib) $(CC_LD_DYNPATH)$(ZLIB_PATH)/$(lib)
-endif
-EXTLIBS += -lz
-
ifdef NEEDS_SOCKET
EXTLIBS += -lsocket
endif
@@ -693,6 +694,9 @@ builtin-init-db.o: builtin-init-db.c PERF-CFLAGS
util/config.o: util/config.c PERF-CFLAGS
$(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
perf-%$X: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 7e58e3ad1508..5f9eefecc574 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -10,9 +10,9 @@
#include "util/util.h"
#include "util/color.h"
-#include "util/list.h"
+#include <linux/list.h>
#include "util/cache.h"
-#include "util/rbtree.h"
+#include <linux/rbtree.h>
#include "util/symbol.h"
#include "util/string.h"
@@ -25,10 +25,6 @@
#define SHOW_USER 2
#define SHOW_HV 4
-#define MIN_GREEN 0.5
-#define MIN_RED 5.0
-
-
static char const *input_name = "perf.data";
static char *vmlinux = "vmlinux";
@@ -43,6 +39,10 @@ static int dump_trace = 0;
static int verbose;
+static int modules;
+
+static int full_paths;
+
static int print_line;
static unsigned long page_size;
@@ -160,7 +160,7 @@ static void dsos__fprintf(FILE *fp)
static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
{
- return dso__find_symbol(kernel_dso, ip);
+ return dso__find_symbol(dso, ip);
}
static int load_kernel(void)
@@ -171,8 +171,8 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
- if (err) {
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
+ if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
} else
@@ -203,7 +203,7 @@ static u64 map__map_ip(struct map *map, u64 ip)
return ip - map->start + map->pgoff;
}
-static u64 vdso__map_ip(struct map *map, u64 ip)
+static u64 vdso__map_ip(struct map *map __used, u64 ip)
{
return ip;
}
@@ -600,7 +600,7 @@ static LIST_HEAD(hist_entry__sort_list);
static int sort_dimension__add(char *tok)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
struct sort_dimension *sd = &sort_dimensions[i];
@@ -855,7 +855,7 @@ static unsigned long total = 0,
total_unknown = 0;
static int
-process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
int show = 0;
@@ -1013,10 +1013,10 @@ process_period_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
- return process_overflow_event(event, offset, head);
-
switch (event->header.type) {
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
case PERF_EVENT_MMAP:
return process_mmap_event(event, offset, head);
@@ -1043,24 +1043,6 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static char *get_color(double percent)
-{
- char *color = PERF_COLOR_NORMAL;
-
- /*
- * We color high-overhead entries in red, mid-overhead
- * entries in green - and keep the low overhead places
- * normal:
- */
- if (percent >= MIN_RED)
- color = PERF_COLOR_RED;
- else {
- if (percent > MIN_GREEN)
- color = PERF_COLOR_GREEN;
- }
- return color;
-}
-
static int
parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
{
@@ -1069,7 +1051,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
static const char *prev_color;
unsigned int offset;
size_t line_len;
- u64 line_ip;
+ s64 line_ip;
int ret;
char *c;
@@ -1122,7 +1104,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
} else if (sym->hist_sum)
percent = 100.0 * hits / sym->hist_sum;
- color = get_color(percent);
+ color = get_percent_color(percent);
/*
* Also color the filename and line if needed, with
@@ -1258,7 +1240,7 @@ static void print_summary(char *filename)
sym_ext = rb_entry(node, struct sym_ext, node);
percent = sym_ext->percent;
- color = get_color(percent);
+ color = get_percent_color(percent);
path = sym_ext->path;
color_fprintf(stdout, color, " %7.2f %s", percent, path);
@@ -1268,19 +1250,25 @@ static void print_summary(char *filename)
static void annotate_sym(struct dso *dso, struct symbol *sym)
{
- char *filename = dso->name;
+ char *filename = dso->name, *d_filename;
u64 start, end, len;
char command[PATH_MAX*2];
FILE *file;
if (!filename)
return;
- if (dso == kernel_dso)
+ if (sym->module)
+ filename = sym->module->path;
+ else if (dso == kernel_dso)
filename = vmlinux;
start = sym->obj_start;
if (!start)
start = sym->start;
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
end = start + sym->end - sym->start + 1;
len = sym->end - sym->start;
@@ -1291,13 +1279,14 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
}
printf("\n\n------------------------------------------------\n");
- printf(" Percent | Source code & Disassembly of %s\n", filename);
+ printf(" Percent | Source code & Disassembly of %s\n", d_filename);
printf("------------------------------------------------\n");
if (verbose >= 2)
printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
- sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (u64)start, (u64)end, filename);
+ sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
+ (u64)start, (u64)end, filename, filename);
if (verbose >= 3)
printf("doing: %s\n", command);
@@ -1428,7 +1417,7 @@ more:
head += size;
- if (offset + head < stat.st_size)
+ if (offset + head < (unsigned long)stat.st_size)
goto more;
rc = EXIT_SUCCESS;
@@ -1472,8 +1461,12 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &print_line,
"print matching source lines (may be slow)"),
+ OPT_BOOLEAN('P', "full-paths", &full_paths,
+ "Don't shorten the displayed pathnames"),
OPT_END()
};
@@ -1492,7 +1485,7 @@ static void setup_sorting(void)
free(str);
}
-int cmd_annotate(int argc, const char **argv, const char *prefix)
+int cmd_annotate(int argc, const char **argv, const char *prefix __used)
{
symbol__init();
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 0f32dc3f3c4c..2599d86a733b 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -3,6 +3,7 @@
*
* Builtin help command
*/
+#include "perf.h"
#include "util/cache.h"
#include "builtin.h"
#include "util/exec_cmd.h"
@@ -277,7 +278,7 @@ static struct cmdnames main_cmds, other_cmds;
void list_common_cmds_help(void)
{
- int i, longest = 0;
+ unsigned int i, longest = 0;
for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
if (longest < strlen(common_cmds[i].name))
@@ -415,9 +416,10 @@ static void show_html_page(const char *perf_cmd)
open_html(page_path.buf);
}
-int cmd_help(int argc, const char **argv, const char *prefix)
+int cmd_help(int argc, const char **argv, const char *prefix __used)
{
const char *alias;
+
load_command_list("perf-", &main_cmds, &other_cmds);
perf_config(perf_help_config, NULL);
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index fe60e37c96ef..f990fa8a35c9 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -13,7 +13,7 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
-int cmd_list(int argc, const char **argv, const char *prefix)
+int cmd_list(int argc __used, const char **argv __used, const char *prefix __used)
{
print_events();
return 0;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index d7ebbd757543..4ef78a5e6f32 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -14,6 +14,8 @@
#include "util/parse-events.h"
#include "util/string.h"
+#include "util/header.h"
+
#include <unistd.h>
#include <sched.h>
@@ -39,6 +41,8 @@ static int force = 0;
static int append_file = 0;
static int call_graph = 0;
static int verbose = 0;
+static int inherit_stat = 0;
+static int no_samples = 0;
static long samples;
static struct timeval last_read;
@@ -52,7 +56,8 @@ static int nr_poll;
static int nr_cpu;
static int file_new = 1;
-static struct perf_file_header file_header;
+
+struct perf_header *header;
struct mmap_event {
struct perf_event_header header;
@@ -289,7 +294,7 @@ static void pid_synthesize_mmap_samples(pid_t pid)
while (1) {
char bf[BUFSIZ], *pbf = bf;
struct mmap_event mmap_ev = {
- .header.type = PERF_EVENT_MMAP,
+ .header = { .type = PERF_EVENT_MMAP },
};
int n;
size_t size;
@@ -306,12 +311,11 @@ static void pid_synthesize_mmap_samples(pid_t pid)
continue;
pbf += n + 3;
if (*pbf == 'x') { /* vm_exec */
- char *execname = strrchr(bf, ' ');
+ char *execname = strchr(bf, '/');
- if (execname == NULL || execname[1] != '/')
+ if (execname == NULL)
continue;
- execname += 1;
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(mmap_ev.filename, execname, size);
@@ -329,7 +333,7 @@ static void pid_synthesize_mmap_samples(pid_t pid)
fclose(fp);
}
-static void synthesize_samples(void)
+static void synthesize_all(void)
{
DIR *proc;
struct dirent dirent, *next;
@@ -353,10 +357,35 @@ static void synthesize_samples(void)
static int group_fd;
+static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr)
+{
+ struct perf_header_attr *h_attr;
+
+ if (nr < header->attrs) {
+ h_attr = header->attr[nr];
+ } else {
+ h_attr = perf_header_attr__new(a);
+ perf_header__add_attr(header, h_attr);
+ }
+
+ return h_attr;
+}
+
static void create_counter(int counter, int cpu, pid_t pid)
{
struct perf_counter_attr *attr = attrs + counter;
- int track = 1;
+ struct perf_header_attr *h_attr;
+ int track = !counter; /* only the first counter needs these */
+ struct {
+ u64 count;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+ } read_data;
+
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
@@ -366,25 +395,20 @@ static void create_counter(int counter, int cpu, pid_t pid)
attr->sample_freq = freq;
}
+ if (no_samples)
+ attr->sample_freq = 0;
+
+ if (inherit_stat)
+ attr->inherit_stat = 1;
+
if (call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (file_new) {
- file_header.sample_type = attr->sample_type;
- } else {
- if (file_header.sample_type != attr->sample_type) {
- fprintf(stderr, "incompatible append\n");
- exit(-1);
- }
- }
-
attr->mmap = track;
attr->comm = track;
attr->inherit = (cpu < 0) && inherit;
attr->disabled = 1;
- track = 0; /* only the first counter needs these */
-
try_again:
fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0);
@@ -415,6 +439,22 @@ try_again:
exit(-1);
}
+ h_attr = get_header_attr(attr, counter);
+
+ if (!file_new) {
+ if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
+ }
+
+ if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) {
+ perror("Unable to read perf file descriptor\n");
+ exit(-1);
+ }
+
+ perf_header_attr__add_id(h_attr, read_data.id);
+
assert(fd[nr_cpu][counter] >= 0);
fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK);
@@ -445,11 +485,6 @@ static void open_counters(int cpu, pid_t pid)
{
int counter;
- if (pid > 0) {
- pid_synthesize_comm_event(pid, 0);
- pid_synthesize_mmap_samples(pid);
- }
-
group_fd = -1;
for (counter = 0; counter < nr_counters; counter++)
create_counter(counter, cpu, pid);
@@ -459,17 +494,16 @@ static void open_counters(int cpu, pid_t pid)
static void atexit_header(void)
{
- file_header.data_size += bytes_written;
+ header->data_size += bytes_written;
- if (pwrite(output, &file_header, sizeof(file_header), 0) == -1)
- perror("failed to write on file headers");
+ perf_header__write(header, output);
}
static int __cmd_record(int argc, const char **argv)
{
int i, counter;
struct stat st;
- pid_t pid;
+ pid_t pid = 0;
int flags;
int ret;
@@ -500,22 +534,31 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- if (!file_new) {
- if (read(output, &file_header, sizeof(file_header)) == -1) {
- perror("failed to read file headers");
- exit(-1);
- }
-
- lseek(output, file_header.data_size, SEEK_CUR);
- }
+ if (!file_new)
+ header = perf_header__read(output);
+ else
+ header = perf_header__new();
atexit(atexit_header);
if (!system_wide) {
- open_counters(-1, target_pid != -1 ? target_pid : getpid());
+ pid = target_pid;
+ if (pid == -1)
+ pid = getpid();
+
+ open_counters(-1, pid);
} else for (i = 0; i < nr_cpus; i++)
open_counters(i, target_pid);
+ if (file_new)
+ perf_header__write(header, output);
+
+ if (!system_wide) {
+ pid_synthesize_comm_event(pid, 0);
+ pid_synthesize_mmap_samples(pid);
+ } else
+ synthesize_all();
+
if (target_pid == -1 && argc) {
pid = fork();
if (pid < 0)
@@ -539,10 +582,7 @@ static int __cmd_record(int argc, const char **argv)
}
}
- if (system_wide)
- synthesize_samples();
-
- while (!done) {
+ for (;;) {
int hits = samples;
for (i = 0; i < nr_cpu; i++) {
@@ -550,8 +590,11 @@ static int __cmd_record(int argc, const char **argv)
mmap_read(&mmap_array[i][counter]);
}
- if (hits == samples)
+ if (hits == samples) {
+ if (done)
+ break;
ret = poll(event_array, nr_poll, 100);
+ }
}
/*
@@ -600,10 +643,14 @@ static const struct option options[] = {
"do call-graph (stack chain/backtrace) recording"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('s', "stat", &inherit_stat,
+ "per thread counts"),
+ OPT_BOOLEAN('n', "no-samples", &no_samples,
+ "don't sample"),
OPT_END()
};
-int cmd_record(int argc, const char **argv, const char *prefix)
+int cmd_record(int argc, const char **argv, const char *prefix __used)
{
int counter;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 5eb5566f0c95..4e5cc266311e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -10,13 +10,16 @@
#include "util/util.h"
#include "util/color.h"
-#include "util/list.h"
+#include <linux/list.h>
#include "util/cache.h"
-#include "util/rbtree.h"
+#include <linux/rbtree.h>
#include "util/symbol.h"
#include "util/string.h"
+#include "util/callchain.h"
+#include "util/strlist.h"
#include "perf.h"
+#include "util/header.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -30,6 +33,8 @@ static char *vmlinux = NULL;
static char default_sort_order[] = "comm,dso";
static char *sort_order = default_sort_order;
+static char *dso_list_str, *comm_list_str, *sym_list_str;
+static struct strlist *dso_list, *comm_list, *sym_list;
static int input;
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
@@ -41,6 +46,8 @@ static int dump_trace = 0;
static int verbose;
#define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
+static int modules;
+
static int full_paths;
static unsigned long page_size;
@@ -52,6 +59,18 @@ static regex_t parent_regex;
static int exclude_other = 1;
+static char callchain_default_opt[] = "fractal,0.5";
+
+static int callchain;
+
+static
+struct callchain_param callchain_param = {
+ .mode = CHAIN_GRAPH_ABS,
+ .min_percent = 0.5
+};
+
+static u64 sample_type;
+
struct ip_event {
struct perf_event_header header;
u64 ip;
@@ -59,11 +78,6 @@ struct ip_event {
unsigned char __more_data[];
};
-struct ip_callchain {
- u64 nr;
- u64 ips[0];
-};
-
struct mmap_event {
struct perf_event_header header;
u32 pid, tid;
@@ -97,6 +111,13 @@ struct lost_event {
u64 lost;
};
+struct read_event {
+ struct perf_event_header header;
+ u32 pid,tid;
+ u64 value;
+ u64 format[3];
+};
+
typedef union event_union {
struct perf_event_header header;
struct ip_event ip;
@@ -105,11 +126,13 @@ typedef union event_union {
struct fork_event fork;
struct period_event period;
struct lost_event lost;
+ struct read_event read;
} event_t;
static LIST_HEAD(dsos);
static struct dso *kernel_dso;
static struct dso *vdso;
+static struct dso *hypervisor_dso;
static void dsos__add(struct dso *dso)
{
@@ -165,7 +188,7 @@ static void dsos__fprintf(FILE *fp)
static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
{
- return dso__find_symbol(kernel_dso, ip);
+ return dso__find_symbol(dso, ip);
}
static int load_kernel(void)
@@ -176,8 +199,8 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
- if (err) {
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
+ if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
} else
@@ -191,6 +214,11 @@ static int load_kernel(void)
dsos__add(vdso);
+ hypervisor_dso = dso__new("[hypervisor]", 0);
+ if (!hypervisor_dso)
+ return -1;
+ dsos__add(hypervisor_dso);
+
return err;
}
@@ -222,14 +250,14 @@ static u64 map__map_ip(struct map *map, u64 ip)
return ip - map->start + map->pgoff;
}
-static u64 vdso__map_ip(struct map *map, u64 ip)
+static u64 vdso__map_ip(struct map *map __used, u64 ip)
{
return ip;
}
static inline int is_anon_memory(const char *filename)
{
- return strcmp(filename, "//anon") == 0;
+ return strcmp(filename, "//anon") == 0;
}
static struct map *map__new(struct mmap_event *event)
@@ -400,9 +428,27 @@ static void thread__insert_map(struct thread *self, struct map *map)
list_for_each_entry_safe(pos, tmp, &self->maps, node) {
if (map__overlap(pos, map)) {
- list_del_init(&pos->node);
- /* XXX leaks dsos */
- free(pos);
+ if (verbose >= 2) {
+ printf("overlapping maps:\n");
+ map__fprintf(map, stdout);
+ map__fprintf(pos, stdout);
+ }
+
+ if (map->start <= pos->start && map->end > pos->start)
+ pos->start = map->end;
+
+ if (map->end >= pos->end && map->start < pos->end)
+ pos->end = map->start;
+
+ if (verbose >= 2) {
+ printf("after collision:\n");
+ map__fprintf(pos, stdout);
+ }
+
+ if (pos->start >= pos->end) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
}
}
@@ -464,17 +510,19 @@ static size_t threads__fprintf(FILE *fp)
static struct rb_root hist;
struct hist_entry {
- struct rb_node rb_node;
-
- struct thread *thread;
- struct map *map;
- struct dso *dso;
- struct symbol *sym;
- struct symbol *parent;
- u64 ip;
- char level;
-
- u64 count;
+ struct rb_node rb_node;
+
+ struct thread *thread;
+ struct map *map;
+ struct dso *dso;
+ struct symbol *sym;
+ struct symbol *parent;
+ u64 ip;
+ char level;
+ struct callchain_node callchain;
+ struct rb_root sorted_chain;
+
+ u64 count;
};
/*
@@ -609,7 +657,11 @@ sort__sym_print(FILE *fp, struct hist_entry *self)
if (self->sym) {
ret += fprintf(fp, "[%c] %s",
- self->dso == kernel_dso ? 'k' : '.', self->sym->name);
+ self->dso == kernel_dso ? 'k' :
+ self->dso == hypervisor_dso ? 'h' : '.', self->sym->name);
+
+ if (self->sym->module)
+ ret += fprintf(fp, "\t[%s]", self->sym->module->name);
} else {
ret += fprintf(fp, "%#016llx", (u64)self->ip);
}
@@ -674,7 +726,7 @@ static LIST_HEAD(hist_entry__sort_list);
static int sort_dimension__add(char *tok)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
struct sort_dimension *sd = &sort_dimensions[i];
@@ -744,34 +796,180 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
return cmp;
}
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
+{
+ int i;
+ size_t ret = 0;
+
+ ret += fprintf(fp, "%s", " ");
+
+ for (i = 0; i < depth; i++)
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "| ");
+ else
+ ret += fprintf(fp, " ");
+
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
static size_t
-hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
+ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
+ int depth_mask, int count, u64 total_samples,
+ int hits)
{
- struct sort_entry *se;
- size_t ret;
+ int i;
+ size_t ret = 0;
- if (exclude_other && !self->parent)
- return 0;
+ ret += fprintf(fp, "%s", " ");
+ for (i = 0; i < depth; i++) {
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "|");
+ else
+ ret += fprintf(fp, " ");
+ if (!count && i == depth - 1) {
+ double percent;
+
+ percent = hits * 100.0 / total_samples;
+ ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ } else
+ ret += fprintf(fp, "%s", " ");
+ }
+ if (chain->sym)
+ ret += fprintf(fp, "%s\n", chain->sym->name);
+ else
+ ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
+
+ return ret;
+}
+
+static size_t
+callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int depth, int depth_mask)
+{
+ struct rb_node *node, *next;
+ struct callchain_node *child;
+ struct callchain_list *chain;
+ int new_depth_mask = depth_mask;
+ u64 new_total;
+ size_t ret = 0;
+ int i;
- if (total_samples) {
- double percent = self->count * 100.0 / total_samples;
- char *color = PERF_COLOR_NORMAL;
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = self->cumul_hit;
+ else
+ new_total = total_samples;
+
+ node = rb_first(&self->rb_root);
+ while (node) {
+ child = rb_entry(node, struct callchain_node, rb_node);
/*
- * We color high-overhead entries in red, mid-overhead
- * entries in green - and keep the low overhead places
- * normal:
+ * The depth mask manages the output of pipes that show
+ * the depth. We don't want to keep the pipes of the current
+ * level for the last child of this depth
*/
- if (percent >= 5.0) {
- color = PERF_COLOR_RED;
- } else {
- if (percent >= 0.5)
- color = PERF_COLOR_GREEN;
+ next = rb_next(node);
+ if (!next)
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ /*
+ * But we keep the older depth mask for the line seperator
+ * to keep the level link until we reach the last child
+ */
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
+ i = 0;
+ list_for_each_entry(chain, &child->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ ret += ipchain__fprintf_graph(fp, chain, depth,
+ new_depth_mask, i++,
+ new_total,
+ child->cumul_hit);
+ }
+ ret += callchain__fprintf_graph(fp, child, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth));
+ node = next;
+ }
+
+ return ret;
+}
+
+static size_t
+callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
+ u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += callchain__fprintf_flat(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ if (chain->sym)
+ ret += fprintf(fp, " %s\n", chain->sym->name);
+ else
+ ret += fprintf(fp, " %p\n",
+ (void *)(long)chain->ip);
+ }
+
+ return ret;
+}
+
+static size_t
+hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
+ u64 total_samples)
+{
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+ size_t ret = 0;
+
+ rb_node = rb_first(&self->sorted_chain);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+ switch (callchain_param.mode) {
+ case CHAIN_FLAT:
+ ret += percent_color_fprintf(fp, " %6.2f%%\n",
+ percent);
+ ret += callchain__fprintf_flat(fp, chain, total_samples);
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ ret += callchain__fprintf_graph(fp, chain,
+ total_samples, 1, 1);
+ default:
+ break;
}
+ ret += fprintf(fp, "\n");
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
- ret = color_fprintf(fp, color, " %6.2f%%",
+static size_t
+hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
+{
+ struct sort_entry *se;
+ size_t ret;
+
+ if (exclude_other && !self->parent)
+ return 0;
+
+ if (total_samples)
+ ret = percent_color_fprintf(fp, " %6.2f%%",
(self->count * 100.0) / total_samples);
- } else
+ else
ret = fprintf(fp, "%12Ld ", self->count);
list_for_each_entry(se, &hist_entry__sort_list, list) {
@@ -784,6 +982,9 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
ret += fprintf(fp, "\n");
+ if (callchain)
+ hist_entry_callchain__fprintf(fp, self, total_samples);
+
return ret;
}
@@ -797,7 +998,7 @@ resolve_symbol(struct thread *thread, struct map **mapp,
{
struct dso *dso = dsop ? *dsop : NULL;
struct map *map = mapp ? *mapp : NULL;
- uint64_t ip = *ipp;
+ u64 ip = *ipp;
if (!thread)
return NULL;
@@ -814,7 +1015,6 @@ resolve_symbol(struct thread *thread, struct map **mapp,
*mapp = map;
got_map:
ip = map->map_ip(map, ip);
- *ipp = ip;
dso = map->dso;
} else {
@@ -828,6 +1028,8 @@ got_map:
dso = kernel_dso;
}
dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+ dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
+ *ipp = ip;
if (dsop)
*dsop = dso;
@@ -846,6 +1048,58 @@ static int call__match(struct symbol *sym)
return 0;
}
+static struct symbol **
+resolve_callchain(struct thread *thread, struct map *map __used,
+ struct ip_callchain *chain, struct hist_entry *entry)
+{
+ u64 context = PERF_CONTEXT_MAX;
+ struct symbol **syms = NULL;
+ unsigned int i;
+
+ if (callchain) {
+ syms = calloc(chain->nr, sizeof(*syms));
+ if (!syms) {
+ fprintf(stderr, "Can't allocate memory for symbols\n");
+ exit(-1);
+ }
+ }
+
+ for (i = 0; i < chain->nr; i++) {
+ u64 ip = chain->ips[i];
+ struct dso *dso = NULL;
+ struct symbol *sym;
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ context = ip;
+ continue;
+ }
+
+ switch (context) {
+ case PERF_CONTEXT_HV:
+ dso = hypervisor_dso;
+ break;
+ case PERF_CONTEXT_KERNEL:
+ dso = kernel_dso;
+ break;
+ default:
+ break;
+ }
+
+ sym = resolve_symbol(thread, NULL, &dso, &ip);
+
+ if (sym) {
+ if (sort__has_parent && call__match(sym) &&
+ !entry->parent)
+ entry->parent = sym;
+ if (!callchain)
+ break;
+ syms[i] = sym;
+ }
+ }
+
+ return syms;
+}
+
/*
* collect histogram counts
*/
@@ -858,6 +1112,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
struct rb_node **p = &hist.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *he;
+ struct symbol **syms = NULL;
struct hist_entry entry = {
.thread = thread,
.map = map,
@@ -867,39 +1122,12 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
.level = level,
.count = count,
.parent = NULL,
+ .sorted_chain = RB_ROOT
};
int cmp;
- if (sort__has_parent && chain) {
- u64 context = PERF_CONTEXT_MAX;
- int i;
-
- for (i = 0; i < chain->nr; i++) {
- u64 ip = chain->ips[i];
- struct dso *dso = NULL;
- struct symbol *sym;
-
- if (ip >= PERF_CONTEXT_MAX) {
- context = ip;
- continue;
- }
-
- switch (context) {
- case PERF_CONTEXT_KERNEL:
- dso = kernel_dso;
- break;
- default:
- break;
- }
-
- sym = resolve_symbol(thread, NULL, &dso, &ip);
-
- if (sym && call__match(sym)) {
- entry.parent = sym;
- break;
- }
- }
- }
+ if ((sort__has_parent || callchain) && chain)
+ syms = resolve_callchain(thread, map, chain, &entry);
while (*p != NULL) {
parent = *p;
@@ -909,6 +1137,10 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
if (!cmp) {
he->count += count;
+ if (callchain) {
+ append_chain(&he->callchain, chain, syms);
+ free(syms);
+ }
return 0;
}
@@ -922,6 +1154,11 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
if (!he)
return -ENOMEM;
*he = entry;
+ if (callchain) {
+ callchain_init(&he->callchain);
+ append_chain(&he->callchain, chain, syms);
+ free(syms);
+ }
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, &hist);
@@ -992,12 +1229,16 @@ static void collapse__resort(void)
static struct rb_root output_hists;
-static void output__insert_entry(struct hist_entry *he)
+static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
{
struct rb_node **p = &output_hists.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
+ if (callchain)
+ callchain_param.sort(&he->sorted_chain, &he->callchain,
+ min_callchain_hits, &callchain_param);
+
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
@@ -1012,11 +1253,14 @@ static void output__insert_entry(struct hist_entry *he)
rb_insert_color(&he->rb_node, &output_hists);
}
-static void output__resort(void)
+static void output__resort(u64 total_samples)
{
struct rb_node *next;
struct hist_entry *n;
struct rb_root *tree = &hist;
+ u64 min_callchain_hits;
+
+ min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
if (sort__need_collapse)
tree = &collapse_hists;
@@ -1028,7 +1272,7 @@ static void output__resort(void)
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, tree);
- output__insert_entry(n);
+ output__insert_entry(n, min_callchain_hits);
}
}
@@ -1054,7 +1298,7 @@ static size_t output__fprintf(FILE *fp, u64 total_samples)
fprintf(fp, "# ........");
list_for_each_entry(se, &hist_entry__sort_list, list) {
- int i;
+ unsigned int i;
if (exclude_other && (se == &sort_parent))
continue;
@@ -1115,7 +1359,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
}
static int
-process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
int show = 0;
@@ -1126,13 +1370,14 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
struct map *map = NULL;
void *more_data = event->ip.__more_data;
struct ip_callchain *chain = NULL;
+ int cpumode;
- if (event->header.type & PERF_SAMPLE_PERIOD) {
+ if (sample_type & PERF_SAMPLE_PERIOD) {
period = *(u64 *)more_data;
more_data += sizeof(u64);
}
- dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
+ dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -1140,8 +1385,8 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
(void *)(long)ip,
(long long)period);
- if (event->header.type & PERF_SAMPLE_CALLCHAIN) {
- int i;
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ unsigned int i;
chain = (void *)more_data;
@@ -1166,7 +1411,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
return -1;
}
- if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
+ if (comm_list && !strlist__has_entry(comm_list, thread->comm))
+ return 0;
+
+ cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
+
+ if (cpumode == PERF_EVENT_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -1174,7 +1424,7 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
dprintf(" ...... dso: %s\n", dso->name);
- } else if (event->header.misc & PERF_EVENT_MISC_USER) {
+ } else if (cpumode == PERF_EVENT_MISC_USER) {
show = SHOW_USER;
level = '.';
@@ -1182,12 +1432,21 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
} else {
show = SHOW_HV;
level = 'H';
+
+ dso = hypervisor_dso;
+
dprintf(" ...... dso: [hypervisor]\n");
}
if (show & show_mask) {
struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
+ if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
+ return 0;
+
+ if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
+ return 0;
+
if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
eprintf("problem incrementing symbol count, skipping event\n");
return -1;
@@ -1328,14 +1587,27 @@ static void trace_event(event_t *event)
}
static int
+process_read_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->read.pid,
+ event->read.tid,
+ event->read.value);
+
+ return 0;
+}
+
+static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
trace_event(event);
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
- return process_overflow_event(event, offset, head);
-
switch (event->header.type) {
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
case PERF_EVENT_MMAP:
return process_mmap_event(event, offset, head);
@@ -1351,6 +1623,9 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
case PERF_EVENT_LOST:
return process_lost_event(event, offset, head);
+ case PERF_EVENT_READ:
+ return process_read_event(event, offset, head);
+
/*
* We dont process them right now but they are fine:
*/
@@ -1366,13 +1641,30 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static struct perf_file_header file_header;
+static struct perf_header *header;
+
+static u64 perf_header__sample_type(void)
+{
+ u64 sample_type = 0;
+ int i;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+
+ if (!sample_type)
+ sample_type = attr->attr.sample_type;
+ else if (sample_type != attr->attr.sample_type)
+ die("non matching sample_type");
+ }
+
+ return sample_type;
+}
static int __cmd_report(void)
{
int ret, rc = EXIT_FAILURE;
unsigned long offset = 0;
- unsigned long head = sizeof(file_header);
+ unsigned long head, shift;
struct stat stat;
event_t *event;
uint32_t size;
@@ -1400,15 +1692,24 @@ static int __cmd_report(void)
exit(0);
}
- if (read(input, &file_header, sizeof(file_header)) == -1) {
- perror("failed to read file headers");
- exit(-1);
- }
+ header = perf_header__read(input);
+ head = header->data_offset;
- if (sort__has_parent &&
- !(file_header.sample_type & PERF_SAMPLE_CALLCHAIN)) {
- fprintf(stderr, "selected --sort parent, but no callchain data\n");
- exit(-1);
+ sample_type = perf_header__sample_type();
+
+ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (sort__has_parent) {
+ fprintf(stderr, "selected --sort parent, but no"
+ " callchain data. Did you call"
+ " perf record without -g?\n");
+ exit(-1);
+ }
+ if (callchain) {
+ fprintf(stderr, "selected -c but no callchain data."
+ " Did you call perf record without"
+ " -g?\n");
+ exit(-1);
+ }
}
if (load_kernel() < 0) {
@@ -1426,6 +1727,11 @@ static int __cmd_report(void)
cwd = NULL;
cwdlen = 0;
}
+
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
remap:
buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
MAP_SHARED, input, offset);
@@ -1442,9 +1748,10 @@ more:
size = 8;
if (head + event->header.size >= page_size * mmap_window) {
- unsigned long shift = page_size * (head / page_size);
int ret;
+ shift = page_size * (head / page_size);
+
ret = munmap(buf, page_size * mmap_window);
assert(ret == 0);
@@ -1482,10 +1789,10 @@ more:
head += size;
- if (offset + head >= sizeof(file_header) + file_header.data_size)
+ if (offset + head >= header->data_offset + header->data_size)
goto done;
- if (offset + head < stat.st_size)
+ if (offset + head < (unsigned long)stat.st_size)
goto more;
done:
@@ -1509,12 +1816,58 @@ done:
dsos__fprintf(stdout);
collapse__resort();
- output__resort();
+ output__resort(total);
output__fprintf(stdout, total);
return rc;
}
+static int
+parse_callchain_opt(const struct option *opt __used, const char *arg,
+ int unset __used)
+{
+ char *tok;
+ char *endptr;
+
+ callchain = 1;
+
+ if (!arg)
+ return 0;
+
+ tok = strtok((char *)arg, ",");
+ if (!tok)
+ return -1;
+
+ /* get the output mode */
+ if (!strncmp(tok, "graph", strlen(arg)))
+ callchain_param.mode = CHAIN_GRAPH_ABS;
+
+ else if (!strncmp(tok, "flat", strlen(arg)))
+ callchain_param.mode = CHAIN_FLAT;
+
+ else if (!strncmp(tok, "fractal", strlen(arg)))
+ callchain_param.mode = CHAIN_GRAPH_REL;
+
+ else
+ return -1;
+
+ /* get the min percentage */
+ tok = strtok(NULL, ",");
+ if (!tok)
+ goto setup;
+
+ callchain_param.min_percent = strtod(tok, &endptr);
+ if (tok == endptr)
+ return -1;
+
+setup:
+ if (register_callchain_param(&callchain_param) < 0) {
+ fprintf(stderr, "Can't register callchain params\n");
+ return -1;
+ }
+ return 0;
+}
+
static const char * const report_usage[] = {
"perf report [<options>] <command>",
NULL
@@ -1528,6 +1881,8 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('P', "full-paths", &full_paths,
@@ -1536,6 +1891,15 @@ static const struct option options[] = {
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &exclude_other,
"Only display entries with parent-match"),
+ OPT_CALLBACK_DEFAULT('c', "callchain", NULL, "output_type,min_percent",
+ "Display callchains using output_type and min percent threshold. "
+ "Default: flat,0", &parse_callchain_opt, callchain_default_opt),
+ OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
+ "only consider symbols in these comms"),
+ OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
OPT_END()
};
@@ -1554,7 +1918,20 @@ static void setup_sorting(void)
free(str);
}
-int cmd_report(int argc, const char **argv, const char *prefix)
+static void setup_list(struct strlist **list, const char *list_str,
+ const char *list_name)
+{
+ if (list_str) {
+ *list = strlist__new(true, list_str);
+ if (!*list) {
+ fprintf(stderr, "problems parsing %s list\n",
+ list_name);
+ exit(129);
+ }
+ }
+}
+
+int cmd_report(int argc, const char **argv, const char *prefix __used)
{
symbol__init();
@@ -1575,6 +1952,10 @@ int cmd_report(int argc, const char **argv, const char *prefix)
if (argc)
usage_with_options(report_usage, options);
+ setup_list(&dso_list, dso_list_str, "dso");
+ setup_list(&comm_list, comm_list_str, "comm");
+ setup_list(&sym_list, sym_list_str, "symbol");
+
setup_pager();
return __cmd_report();
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 6d3eeac1ea25..27921a8ce1a9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -32,6 +32,7 @@
* Wu Fengguang <fengguang.wu@intel.com>
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
+ * Jaswinder Singh Rajput <jaswinder@kernel.org>
*
* Released under the GPL v2. (and only v2, not any later version)
*/
@@ -45,7 +46,7 @@
#include <sys/prctl.h>
#include <math.h>
-static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
+static struct perf_counter_attr default_attrs[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -59,42 +60,28 @@ static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
};
+#define MAX_RUN 100
+
static int system_wide = 0;
-static int inherit = 1;
static int verbose = 0;
+static unsigned int nr_cpus = 0;
+static int run_idx = 0;
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-
-static int target_pid = -1;
-static int nr_cpus = 0;
-static unsigned int page_size;
-
+static int run_count = 1;
+static int inherit = 1;
static int scale = 1;
+static int target_pid = -1;
+static int null_run = 0;
-static const unsigned int default_count[] = {
- 1000000,
- 1000000,
- 10000,
- 10000,
- 1000000,
- 10000,
-};
-
-#define MAX_RUN 100
-
-static int run_count = 1;
-static int run_idx = 0;
-
-static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
-static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
-
-//static u64 event_hist[MAX_RUN][MAX_COUNTERS][3];
-
+static int fd[MAX_NR_CPUS][MAX_COUNTERS];
static u64 runtime_nsecs[MAX_RUN];
static u64 walltime_nsecs[MAX_RUN];
static u64 runtime_cycles[MAX_RUN];
+static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
+static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
+
static u64 event_res_avg[MAX_COUNTERS][3];
static u64 event_res_noise[MAX_COUNTERS][3];
@@ -109,7 +96,14 @@ static u64 walltime_nsecs_noise;
static u64 runtime_cycles_avg;
static u64 runtime_cycles_noise;
-static void create_perf_stat_counter(int counter)
+#define MATCH_EVENT(t, c, counter) \
+ (attrs[counter].type == PERF_TYPE_##t && \
+ attrs[counter].config == PERF_COUNT_##c)
+
+#define ERR_PERF_OPEN \
+"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n"
+
+static void create_perf_stat_counter(int counter, int pid)
{
struct perf_counter_attr *attr = attrs + counter;
@@ -118,21 +112,23 @@ static void create_perf_stat_counter(int counter)
PERF_FORMAT_TOTAL_TIME_RUNNING;
if (system_wide) {
- int cpu;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ unsigned int cpu;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0);
- if (fd[cpu][counter] < 0 && verbose) {
- printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[cpu][counter], strerror(errno));
- }
+ if (fd[cpu][counter] < 0 && verbose)
+ fprintf(stderr, ERR_PERF_OPEN, counter,
+ fd[cpu][counter], strerror(errno));
}
} else {
- attr->inherit = inherit;
- attr->disabled = 1;
-
- fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0);
- if (fd[0][counter] < 0 && verbose) {
- printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[0][counter], strerror(errno));
- }
+ attr->inherit = inherit;
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+
+ fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0);
+ if (fd[0][counter] < 0 && verbose)
+ fprintf(stderr, ERR_PERF_OPEN, counter,
+ fd[0][counter], strerror(errno));
}
}
@@ -141,13 +137,8 @@ static void create_perf_stat_counter(int counter)
*/
static inline int nsec_counter(int counter)
{
- if (attrs[counter].type != PERF_TYPE_SOFTWARE)
- return 0;
-
- if (attrs[counter].config == PERF_COUNT_SW_CPU_CLOCK)
- return 1;
-
- if (attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK)
+ if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) ||
+ MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
return 1;
return 0;
@@ -159,8 +150,8 @@ static inline int nsec_counter(int counter)
static void read_counter(int counter)
{
u64 *count, single_count[3];
- ssize_t res;
- int cpu, nv;
+ unsigned int cpu;
+ size_t res, nv;
int scaled;
count = event_res[run_idx][counter];
@@ -168,12 +159,13 @@ static void read_counter(int counter)
count[0] = count[1] = count[2] = 0;
nv = scale ? 3 : 1;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
if (fd[cpu][counter] < 0)
continue;
res = read(fd[cpu][counter], single_count, nv * sizeof(u64));
assert(res == nv * sizeof(u64));
+
close(fd[cpu][counter]);
fd[cpu][counter] = -1;
@@ -201,46 +193,81 @@ static void read_counter(int counter)
/*
* Save the full runtime - to allow normalization during printout:
*/
- if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
- attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK)
+ if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
runtime_nsecs[run_idx] = count[0];
- if (attrs[counter].type == PERF_TYPE_HARDWARE &&
- attrs[counter].config == PERF_COUNT_HW_CPU_CYCLES)
+ if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
runtime_cycles[run_idx] = count[0];
}
-static int run_perf_stat(int argc, const char **argv)
+static int run_perf_stat(int argc __used, const char **argv)
{
unsigned long long t0, t1;
int status = 0;
int counter;
int pid;
+ int child_ready_pipe[2], go_pipe[2];
+ char buf;
if (!system_wide)
nr_cpus = 1;
- for (counter = 0; counter < nr_counters; counter++)
- create_perf_stat_counter(counter);
-
- /*
- * Enable counters and exec the command:
- */
- t0 = rdclock();
- prctl(PR_TASK_PERF_COUNTERS_ENABLE);
+ if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) {
+ perror("failed to create pipes");
+ exit(1);
+ }
if ((pid = fork()) < 0)
perror("failed to fork");
if (!pid) {
- if (execvp(argv[0], (char **)argv)) {
- perror(argv[0]);
- exit(-1);
- }
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
}
+ /*
+ * Wait for the child to be ready to exec.
+ */
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ if (read(child_ready_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+ close(child_ready_pipe[0]);
+
+ for (counter = 0; counter < nr_counters; counter++)
+ create_perf_stat_counter(counter, pid);
+
+ /*
+ * Enable counters and exec the command:
+ */
+ t0 = rdclock();
+
+ close(go_pipe[1]);
wait(&status);
- prctl(PR_TASK_PERF_COUNTERS_DISABLE);
t1 = rdclock();
walltime_nsecs[run_idx] = t1 - t0;
@@ -262,11 +289,9 @@ static void nsec_printout(int counter, u64 *count, u64 *noise)
{
double msecs = (double)count[0] / 1000000;
- fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter));
-
- if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
- attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) {
+ fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter));
+ if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
if (walltime_nsecs_avg)
fprintf(stderr, " # %10.3f CPUs ",
(double)count[0] / (double)walltime_nsecs_avg);
@@ -276,12 +301,10 @@ static void nsec_printout(int counter, u64 *count, u64 *noise)
static void abs_printout(int counter, u64 *count, u64 *noise)
{
- fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter));
+ fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter));
if (runtime_cycles_avg &&
- attrs[counter].type == PERF_TYPE_HARDWARE &&
- attrs[counter].config == PERF_COUNT_HW_INSTRUCTIONS) {
-
+ MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
fprintf(stderr, " # %10.3f IPC ",
(double)count[0] / (double)runtime_cycles_avg);
} else {
@@ -306,7 +329,7 @@ static void print_counter(int counter)
scaled = event_scaled_avg[counter];
if (scaled == -1) {
- fprintf(stderr, " %14s %-20s\n",
+ fprintf(stderr, " %14s %-24s\n",
"<not counted>", event_name(counter));
return;
}
@@ -364,8 +387,11 @@ static void calc_avg(void)
event_res_avg[j]+1, event_res[i][j]+1);
update_avg("counter/2", j,
event_res_avg[j]+2, event_res[i][j]+2);
- update_avg("scaled", j,
- event_scaled_avg + j, event_scaled[i]+j);
+ if (event_scaled[i][j] != (u64)-1)
+ update_avg("scaled", j,
+ event_scaled_avg + j, event_scaled[i]+j);
+ else
+ event_scaled_avg[j] = -1;
}
}
runtime_nsecs_avg /= run_count;
@@ -429,11 +455,14 @@ static void print_stat(int argc, const char **argv)
for (counter = 0; counter < nr_counters; counter++)
print_counter(counter);
-
fprintf(stderr, "\n");
- fprintf(stderr, " %14.9f seconds time elapsed.\n",
+ fprintf(stderr, " %14.9f seconds time elapsed",
(double)walltime_nsecs_avg/1e9);
- fprintf(stderr, "\n");
+ if (run_count > 1) {
+ fprintf(stderr, " ( +- %7.3f%% )",
+ 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg);
+ }
+ fprintf(stderr, "\n\n");
}
static volatile int signr = -1;
@@ -466,36 +495,37 @@ static const struct option options[] = {
OPT_INTEGER('p', "pid", &target_pid,
"stat events on existing pid"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
- "system-wide collection from all CPUs"),
+ "system-wide collection from all CPUs"),
OPT_BOOLEAN('S', "scale", &scale,
- "scale/normalize counters"),
+ "scale/normalize counters"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
+ OPT_BOOLEAN('n', "null", &null_run,
+ "null run - dont start any counters"),
OPT_END()
};
-int cmd_stat(int argc, const char **argv, const char *prefix)
+int cmd_stat(int argc, const char **argv, const char *prefix __used)
{
int status;
- page_size = sysconf(_SC_PAGE_SIZE);
-
- memcpy(attrs, default_attrs, sizeof(attrs));
-
argc = parse_options(argc, argv, options, stat_usage, 0);
if (!argc)
usage_with_options(stat_usage, options);
if (run_count <= 0 || run_count > MAX_RUN)
usage_with_options(stat_usage, options);
- if (!nr_counters)
- nr_counters = 8;
+ /* Set attrs and nr_counters if no event is selected and !null_run */
+ if (!null_run && !nr_counters) {
+ memcpy(attrs, default_attrs, sizeof(default_attrs));
+ nr_counters = ARRAY_SIZE(default_attrs);
+ }
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(nr_cpus <= MAX_NR_CPUS);
- assert(nr_cpus >= 0);
+ assert((int)nr_cpus >= 0);
/*
* We dont want to block the signals - that would cause
@@ -511,7 +541,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix)
status = 0;
for (run_idx = 0; run_idx < run_count; run_idx++) {
if (run_count != 1 && verbose)
- fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx+1);
+ fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
status = run_perf_stat(argc, argv);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 5352b5e352ed..95d5c0ae375a 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -23,7 +23,7 @@
#include "util/symbol.h"
#include "util/color.h"
#include "util/util.h"
-#include "util/rbtree.h"
+#include <linux/rbtree.h>
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -66,6 +66,7 @@ static unsigned int page_size;
static unsigned int mmap_pages = 16;
static int freq = 0;
static int verbose = 0;
+static char *vmlinux = NULL;
static char *sym_filter;
static unsigned long filter_start;
@@ -238,7 +239,6 @@ static void print_sym_table(void)
for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
struct symbol *sym = (struct symbol *)(syme + 1);
- char *color = PERF_COLOR_NORMAL;
double pcnt;
if (++printed > print_entries || syme->snap_count < count_filter)
@@ -247,29 +247,20 @@ static void print_sym_table(void)
pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
sum_ksamples));
- /*
- * We color high-overhead entries in red, mid-overhead
- * entries in green - and keep the low overhead places
- * normal:
- */
- if (pcnt >= 5.0) {
- color = PERF_COLOR_RED;
- } else {
- if (pcnt >= 0.5)
- color = PERF_COLOR_GREEN;
- }
-
if (nr_counters == 1)
printf("%20.2f - ", syme->weight);
else
printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
- color_fprintf(stdout, color, "%4.1f%%", pcnt);
- printf(" - %016llx : %s\n", sym->start, sym->name);
+ percent_color_fprintf(stdout, "%4.1f%%", pcnt);
+ printf(" - %016llx : %s", sym->start, sym->name);
+ if (sym->module)
+ printf("\t[%s]", sym->module->name);
+ printf("\n");
}
}
-static void *display_thread(void *arg)
+static void *display_thread(void *arg __used)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
int delay_msecs = delay_secs * 1000;
@@ -286,11 +277,31 @@ static void *display_thread(void *arg)
return NULL;
}
+/* Tag samples to be skipped. */
+static const char *skip_symbols[] = {
+ "default_idle",
+ "cpu_idle",
+ "enter_idle",
+ "exit_idle",
+ "mwait_idle",
+ "ppc64_runlatch_off",
+ "pseries_dedicated_idle_sleep",
+ NULL
+};
+
static int symbol_filter(struct dso *self, struct symbol *sym)
{
static int filter_match;
struct sym_entry *syme;
const char *name = sym->name;
+ int i;
+
+ /*
+ * ppc64 uses function descriptors and appends a '.' to the
+ * start of every instruction address. Remove it.
+ */
+ if (name[0] == '.')
+ name++;
if (!strcmp(name, "_text") ||
!strcmp(name, "_etext") ||
@@ -302,13 +313,12 @@ static int symbol_filter(struct dso *self, struct symbol *sym)
return 1;
syme = dso__sym_priv(self, sym);
- /* Tag samples to be skipped. */
- if (!strcmp("default_idle", name) ||
- !strcmp("cpu_idle", name) ||
- !strcmp("enter_idle", name) ||
- !strcmp("exit_idle", name) ||
- !strcmp("mwait_idle", name))
- syme->skip = 1;
+ for (i = 0; skip_symbols[i]; i++) {
+ if (!strcmp(skip_symbols[i], name)) {
+ syme->skip = 1;
+ break;
+ }
+ }
if (filter_match == 1) {
filter_end = sym->start;
@@ -340,12 +350,13 @@ static int parse_symbols(void)
{
struct rb_node *node;
struct symbol *sym;
+ int modules = vmlinux ? 1 : 0;
kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0)
+ if (dso__load_kernel(kernel_dso, vmlinux, symbol_filter, verbose, modules) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
@@ -392,11 +403,11 @@ static void record_ip(u64 ip, int counter)
samples--;
}
-static void process_event(u64 ip, int counter)
+static void process_event(u64 ip, int counter, int user)
{
samples++;
- if (ip < min_ip || ip > max_ip) {
+ if (user) {
userspace_samples++;
return;
}
@@ -407,7 +418,7 @@ static void process_event(u64 ip, int counter)
struct mmap_data {
int counter;
void *base;
- unsigned int mask;
+ int mask;
unsigned int prev;
};
@@ -509,9 +520,10 @@ static void mmap_read_counter(struct mmap_data *md)
old += size;
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
- if (event->header.type & PERF_SAMPLE_IP)
- process_event(event->ip.ip, md->counter);
+ if (event->header.type == PERF_EVENT_SAMPLE) {
+ int user =
+ (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER;
+ process_event(event->ip.ip, md->counter, user);
}
}
@@ -660,6 +672,7 @@ static const struct option options[] = {
"system-wide collection from all CPUs"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
+ OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
"number of mmap data pages"),
OPT_INTEGER('r', "realtime", &realtime_prio,
@@ -674,7 +687,7 @@ static const struct option options[] = {
"put the counters into a counter group"),
OPT_STRING('s', "sym-filter", &sym_filter, "pattern",
"only display symbols matchig this pattern"),
- OPT_BOOLEAN('z', "zero", &group,
+ OPT_BOOLEAN('z', "zero", &zero,
"zero history across updates"),
OPT_INTEGER('F', "freq", &freq,
"profile at this frequency"),
@@ -685,10 +698,12 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_top(int argc, const char **argv, const char *prefix)
+int cmd_top(int argc, const char **argv, const char *prefix __used)
{
int counter;
+ symbol__init();
+
page_size = sysconf(_SC_PAGE_SIZE);
argc = parse_options(argc, argv, options, top_usage, 0);
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 4eb725933703..c5656784c61d 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -229,9 +229,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
- if (p->option & NEED_WORK_TREE)
- /* setup_work_tree() */;
-
status = p->fn(argc, argv, prefix);
if (status)
return status & 0xff;
@@ -266,7 +263,7 @@ static void handle_internal_command(int argc, const char **argv)
{ "annotate", cmd_annotate, 0 },
{ "version", cmd_version, 0 },
};
- int i;
+ unsigned int i;
static const char ext[] = STRIP_EXTENSION;
if (sizeof(ext) > 1) {
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index ceb68aa51f7f..63e67cc5487b 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -19,13 +19,29 @@
#define cpu_relax() asm volatile("" ::: "memory");
#endif
+#ifdef __sh__
+#include "../../arch/sh/include/asm/unistd.h"
+#if defined(__SH4A__) || defined(__SH5__)
+# define rmb() asm volatile("synco" ::: "memory")
+#else
+# define rmb() asm volatile("" ::: "memory")
+#endif
+#define cpu_relax() asm volatile("" ::: "memory")
+#endif
+
+#ifdef __hppa__
+#include "../../arch/parisc/include/asm/unistd.h"
+#define rmb() asm volatile("" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory");
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include "../../include/linux/perf_counter.h"
-#include "types.h"
+#include "util/types.h"
/*
* prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
@@ -52,6 +68,8 @@ static inline unsigned long long rdclock(void)
#define __user
#define asmlinkage
+#define __used __attribute__((__unused__))
+
#define unlikely(x) __builtin_expect(!!(x), 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
@@ -72,10 +90,9 @@ sys_perf_counter_open(struct perf_counter_attr *attr,
#define MAX_COUNTERS 256
#define MAX_NR_CPUS 256
-struct perf_file_header {
- u64 version;
- u64 sample_type;
- u64 data_size;
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
};
#endif
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
index 9b3dd2b428df..b8144e80bb1e 100644
--- a/tools/perf/util/alias.c
+++ b/tools/perf/util/alias.c
@@ -3,7 +3,7 @@
static const char *alias_key;
static char *alias_val;
-static int alias_lookup_cb(const char *k, const char *v, void *cb)
+static int alias_lookup_cb(const char *k, const char *v, void *cb __used)
{
if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
if (!v)
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 393d6146d13b..161d5f413e28 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -3,6 +3,7 @@
#include "util.h"
#include "strbuf.h"
+#include "../perf.h"
#define PERF_DIR_ENVIRONMENT "PERF_DIR"
#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
new file mode 100644
index 000000000000..9d3c8141b8c1
--- /dev/null
+++ b/tools/perf/util/callchain.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Handle the callchains from the stream in an ad-hoc radix tree and then
+ * sort them in an rbtree.
+ *
+ * Using a radix for code path provides a fast retrieval and factorizes
+ * memory use. Also that lets us use the paths in a hierarchical graph view.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+
+#include "callchain.h"
+
+#define chain_for_each_child(child, parent) \
+ list_for_each_entry(child, &parent->children, brothers)
+
+static void
+rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
+ enum chain_mode mode)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct callchain_node *rnode;
+
+ while (*p) {
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node);
+
+ switch (mode) {
+ case CHAIN_FLAT:
+ if (rnode->hit < chain->hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ if (rnode->cumul_hit < chain->cumul_hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ default:
+ break;
+ }
+ }
+
+ rb_link_node(&chain->rb_node, parent, p);
+ rb_insert_color(&chain->rb_node, root);
+}
+
+static void
+__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+ u64 min_hit)
+{
+ struct callchain_node *child;
+
+ chain_for_each_child(child, node)
+ __sort_chain_flat(rb_root, child, min_hit);
+
+ if (node->hit && node->hit >= min_hit)
+ rb_insert_callchain(rb_root, node, CHAIN_FLAT);
+}
+
+/*
+ * Once we get every callchains from the stream, we can now
+ * sort them by hit
+ */
+static void
+sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+ u64 min_hit, struct callchain_param *param __used)
+{
+ __sort_chain_flat(rb_root, node, min_hit);
+}
+
+static void __sort_chain_graph_abs(struct callchain_node *node,
+ u64 min_hit)
+{
+ struct callchain_node *child;
+
+ node->rb_root = RB_ROOT;
+
+ chain_for_each_child(child, node) {
+ __sort_chain_graph_abs(child, min_hit);
+ if (child->cumul_hit >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_ABS);
+ }
+}
+
+static void
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_node *chain_root,
+ u64 min_hit, struct callchain_param *param __used)
+{
+ __sort_chain_graph_abs(chain_root, min_hit);
+ rb_root->rb_node = chain_root->rb_root.rb_node;
+}
+
+static void __sort_chain_graph_rel(struct callchain_node *node,
+ double min_percent)
+{
+ struct callchain_node *child;
+ u64 min_hit;
+
+ node->rb_root = RB_ROOT;
+ min_hit = node->cumul_hit * min_percent / 100.0;
+
+ chain_for_each_child(child, node) {
+ __sort_chain_graph_rel(child, min_percent);
+ if (child->cumul_hit >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_REL);
+ }
+}
+
+static void
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root,
+ u64 min_hit __used, struct callchain_param *param)
+{
+ __sort_chain_graph_rel(chain_root, param->min_percent);
+ rb_root->rb_node = chain_root->rb_root.rb_node;
+}
+
+int register_callchain_param(struct callchain_param *param)
+{
+ switch (param->mode) {
+ case CHAIN_GRAPH_ABS:
+ param->sort = sort_chain_graph_abs;
+ break;
+ case CHAIN_GRAPH_REL:
+ param->sort = sort_chain_graph_rel;
+ break;
+ case CHAIN_FLAT:
+ param->sort = sort_chain_flat;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Create a child for a parent. If inherit_children, then the new child
+ * will become the new parent of it's parent children
+ */
+static struct callchain_node *
+create_child(struct callchain_node *parent, bool inherit_children)
+{
+ struct callchain_node *new;
+
+ new = malloc(sizeof(*new));
+ if (!new) {
+ perror("not enough memory to create child for code path tree");
+ return NULL;
+ }
+ new->parent = parent;
+ INIT_LIST_HEAD(&new->children);
+ INIT_LIST_HEAD(&new->val);
+
+ if (inherit_children) {
+ struct callchain_node *next;
+
+ list_splice(&parent->children, &new->children);
+ INIT_LIST_HEAD(&parent->children);
+
+ chain_for_each_child(next, new)
+ next->parent = new;
+ }
+ list_add_tail(&new->brothers, &parent->children);
+
+ return new;
+}
+
+/*
+ * Fill the node with callchain values
+ */
+static void
+fill_node(struct callchain_node *node, struct ip_callchain *chain,
+ int start, struct symbol **syms)
+{
+ unsigned int i;
+
+ for (i = start; i < chain->nr; i++) {
+ struct callchain_list *call;
+
+ call = malloc(sizeof(*call));
+ if (!call) {
+ perror("not enough memory for the code path tree");
+ return;
+ }
+ call->ip = chain->ips[i];
+ call->sym = syms[i];
+ list_add_tail(&call->list, &node->val);
+ }
+ node->val_nr = chain->nr - start;
+ if (!node->val_nr)
+ printf("Warning: empty node in callchain tree\n");
+}
+
+static void
+add_child(struct callchain_node *parent, struct ip_callchain *chain,
+ int start, struct symbol **syms)
+{
+ struct callchain_node *new;
+
+ new = create_child(parent, false);
+ fill_node(new, chain, start, syms);
+
+ new->cumul_hit = new->hit = 1;
+}
+
+/*
+ * Split the parent in two parts (a new child is created) and
+ * give a part of its callchain to the created child.
+ * Then create another child to host the given callchain of new branch
+ */
+static void
+split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
+ struct callchain_list *to_split, int idx_parents, int idx_local,
+ struct symbol **syms)
+{
+ struct callchain_node *new;
+ struct list_head *old_tail;
+ unsigned int idx_total = idx_parents + idx_local;
+
+ /* split */
+ new = create_child(parent, true);
+
+ /* split the callchain and move a part to the new child */
+ old_tail = parent->val.prev;
+ list_del_range(&to_split->list, old_tail);
+ new->val.next = &to_split->list;
+ new->val.prev = old_tail;
+ to_split->list.prev = &new->val;
+ old_tail->next = &new->val;
+
+ /* split the hits */
+ new->hit = parent->hit;
+ new->cumul_hit = parent->cumul_hit;
+ new->val_nr = parent->val_nr - idx_local;
+ parent->val_nr = idx_local;
+
+ /* create a new child for the new branch if any */
+ if (idx_total < chain->nr) {
+ parent->hit = 0;
+ add_child(parent, chain, idx_total, syms);
+ } else {
+ parent->hit = 1;
+ }
+}
+
+static int
+__append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ unsigned int start, struct symbol **syms);
+
+static void
+__append_chain_children(struct callchain_node *root, struct ip_callchain *chain,
+ struct symbol **syms, unsigned int start)
+{
+ struct callchain_node *rnode;
+
+ /* lookup in childrens */
+ chain_for_each_child(rnode, root) {
+ unsigned int ret = __append_chain(rnode, chain, start, syms);
+
+ if (!ret)
+ goto cumul;
+ }
+ /* nothing in children, add to the current node */
+ add_child(root, chain, start, syms);
+
+cumul:
+ root->cumul_hit++;
+}
+
+static int
+__append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ unsigned int start, struct symbol **syms)
+{
+ struct callchain_list *cnode;
+ unsigned int i = start;
+ bool found = false;
+
+ /*
+ * Lookup in the current node
+ * If we have a symbol, then compare the start to match
+ * anywhere inside a function.
+ */
+ list_for_each_entry(cnode, &root->val, list) {
+ if (i == chain->nr)
+ break;
+ if (cnode->sym && syms[i]) {
+ if (cnode->sym->start != syms[i]->start)
+ break;
+ } else if (cnode->ip != chain->ips[i])
+ break;
+ if (!found)
+ found = true;
+ i++;
+ }
+
+ /* matches not, relay on the parent */
+ if (!found)
+ return -1;
+
+ /* we match only a part of the node. Split it and add the new chain */
+ if (i - start < root->val_nr) {
+ split_add_child(root, chain, cnode, start, i - start, syms);
+ return 0;
+ }
+
+ /* we match 100% of the path, increment the hit */
+ if (i - start == root->val_nr && i == chain->nr) {
+ root->hit++;
+ root->cumul_hit++;
+
+ return 0;
+ }
+
+ /* We match the node and still have a part remaining */
+ __append_chain_children(root, chain, syms, i);
+
+ return 0;
+}
+
+void append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ struct symbol **syms)
+{
+ __append_chain_children(root, chain, syms, 0);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
new file mode 100644
index 000000000000..7812122bea1d
--- /dev/null
+++ b/tools/perf/util/callchain.h
@@ -0,0 +1,54 @@
+#ifndef __PERF_CALLCHAIN_H
+#define __PERF_CALLCHAIN_H
+
+#include "../perf.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include "symbol.h"
+
+enum chain_mode {
+ CHAIN_FLAT,
+ CHAIN_GRAPH_ABS,
+ CHAIN_GRAPH_REL
+};
+
+struct callchain_node {
+ struct callchain_node *parent;
+ struct list_head brothers;
+ struct list_head children;
+ struct list_head val;
+ struct rb_node rb_node; /* to sort nodes in an rbtree */
+ struct rb_root rb_root; /* sorted tree of children */
+ unsigned int val_nr;
+ u64 hit;
+ u64 cumul_hit; /* hit + hits of children */
+};
+
+struct callchain_param;
+
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
+ u64, struct callchain_param *);
+
+struct callchain_param {
+ enum chain_mode mode;
+ double min_percent;
+ sort_chain_func_t sort;
+};
+
+struct callchain_list {
+ u64 ip;
+ struct symbol *sym;
+ struct list_head list;
+};
+
+static inline void callchain_init(struct callchain_node *node)
+{
+ INIT_LIST_HEAD(&node->brothers);
+ INIT_LIST_HEAD(&node->children);
+ INIT_LIST_HEAD(&node->val);
+}
+
+int register_callchain_param(struct callchain_param *param);
+void append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ struct symbol **syms);
+#endif
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 9a8c20ccc53e..90a044d1fe7d 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -11,7 +11,8 @@ static int parse_color(const char *name, int len)
};
char *end;
int i;
- for (i = 0; i < ARRAY_SIZE(color_names); i++) {
+
+ for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
const char *str = color_names[i];
if (!strncasecmp(name, str, len) && !str[len])
return i - 1;
@@ -28,7 +29,8 @@ static int parse_attr(const char *name, int len)
static const char * const attr_names[] = {
"bold", "dim", "ul", "blink", "reverse"
};
- int i;
+ unsigned int i;
+
for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
const char *str = attr_names[i];
if (!strncasecmp(name, str, len) && !str[len])
@@ -222,10 +224,12 @@ int color_fwrite_lines(FILE *fp, const char *color,
{
if (!*color)
return fwrite(buf, count, 1, fp) != 1;
+
while (count) {
char *p = memchr(buf, '\n', count);
+
if (p != buf && (fputs(color, fp) < 0 ||
- fwrite(buf, p ? p - buf : count, 1, fp) != 1 ||
+ fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
fputs(PERF_COLOR_RESET, fp) < 0))
return -1;
if (!p)
@@ -238,4 +242,31 @@ int color_fwrite_lines(FILE *fp, const char *color,
return 0;
}
+char *get_percent_color(double percent)
+{
+ char *color = PERF_COLOR_NORMAL;
+ /*
+ * We color high-overhead entries in red, mid-overhead
+ * entries in green - and keep the low overhead places
+ * normal:
+ */
+ if (percent >= MIN_RED)
+ color = PERF_COLOR_RED;
+ else {
+ if (percent > MIN_GREEN)
+ color = PERF_COLOR_GREEN;
+ }
+ return color;
+}
+
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
+{
+ int r;
+ char *color;
+
+ color = get_percent_color(percent);
+ r = color_fprintf(fp, color, fmt, percent);
+
+ return r;
+}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 5abfd379582b..706cec50bd25 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -15,6 +15,9 @@
#define PERF_COLOR_CYAN "\033[36m"
#define PERF_COLOR_BG_RED "\033[41m"
+#define MIN_GREEN 0.5
+#define MIN_RED 5.0
+
/*
* This variable stores the value of color.ui
*/
@@ -32,5 +35,7 @@ void color_parse_mem(const char *value, int len, const char *var, char *dst);
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
+char *get_percent_color(double percent);
#endif /* COLOR_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 3dd13faa6a27..780df541006d 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -47,10 +47,12 @@ static int get_next_char(void)
static char *parse_value(void)
{
static char value[1024];
- int quote = 0, comment = 0, len = 0, space = 0;
+ int quote = 0, comment = 0, space = 0;
+ size_t len = 0;
for (;;) {
int c = get_next_char();
+
if (len >= sizeof(value) - 1)
return NULL;
if (c == '\n') {
@@ -353,13 +355,13 @@ int perf_config_string(const char **dest, const char *var, const char *value)
return 0;
}
-static int perf_default_core_config(const char *var, const char *value)
+static int perf_default_core_config(const char *var __used, const char *value __used)
{
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
-int perf_default_config(const char *var, const char *value, void *dummy)
+int perf_default_config(const char *var, const char *value, void *dummy __used)
{
if (!prefixcmp(var, "core."))
return perf_default_core_config(var, value);
@@ -471,10 +473,10 @@ static int matches(const char* key, const char* value)
!regexec(store.value_regex, value, 0, NULL, 0)));
}
-static int store_aux(const char* key, const char* value, void *cb)
+static int store_aux(const char* key, const char* value, void *cb __used)
{
+ int section_len;
const char *ep;
- size_t section_len;
switch (store.state) {
case KEY_SEEN:
@@ -551,7 +553,7 @@ static int store_write_section(int fd, const char* key)
strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
}
- success = write_in_full(fd, sb.buf, sb.len) == sb.len;
+ success = (write_in_full(fd, sb.buf, sb.len) == (ssize_t)sb.len);
strbuf_release(&sb);
return success;
@@ -599,7 +601,7 @@ static int store_write_pair(int fd, const char* key, const char* value)
}
strbuf_addf(&sb, "%s\n", quote);
- success = write_in_full(fd, sb.buf, sb.len) == sb.len;
+ success = (write_in_full(fd, sb.buf, sb.len) == (ssize_t)sb.len);
strbuf_release(&sb);
return success;
@@ -741,7 +743,7 @@ int perf_config_set_multivar(const char* key, const char* value,
} else {
struct stat st;
char* contents;
- size_t contents_sz, copy_begin, copy_end;
+ ssize_t contents_sz, copy_begin, copy_end;
int i, new_line = 0;
if (value_regex == NULL)
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index d39292263153..34a352867382 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -1,6 +1,9 @@
#include "cache.h"
#include "exec_cmd.h"
#include "quote.h"
+
+#include <string.h>
+
#define MAX_ARGS 32
extern char **environ;
@@ -51,7 +54,7 @@ const char *perf_extract_argv0_path(const char *argv0)
slash--;
if (slash >= argv0) {
- argv0_path = strndup(argv0, slash - argv0);
+ argv0_path = xstrndup(argv0, slash - argv0);
return slash + 1;
}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
new file mode 100644
index 000000000000..450384b3bbe5
--- /dev/null
+++ b/tools/perf/util/header.c
@@ -0,0 +1,242 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "util.h"
+#include "header.h"
+
+/*
+ *
+ */
+
+struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr)
+{
+ struct perf_header_attr *self = malloc(sizeof(*self));
+
+ if (!self)
+ die("nomem");
+
+ self->attr = *attr;
+ self->ids = 0;
+ self->size = 1;
+ self->id = malloc(sizeof(u64));
+
+ if (!self->id)
+ die("nomem");
+
+ return self;
+}
+
+void perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
+{
+ int pos = self->ids;
+
+ self->ids++;
+ if (self->ids > self->size) {
+ self->size *= 2;
+ self->id = realloc(self->id, self->size * sizeof(u64));
+ if (!self->id)
+ die("nomem");
+ }
+ self->id[pos] = id;
+}
+
+/*
+ *
+ */
+
+struct perf_header *perf_header__new(void)
+{
+ struct perf_header *self = malloc(sizeof(*self));
+
+ if (!self)
+ die("nomem");
+
+ self->frozen = 0;
+
+ self->attrs = 0;
+ self->size = 1;
+ self->attr = malloc(sizeof(void *));
+
+ if (!self->attr)
+ die("nomem");
+
+ self->data_offset = 0;
+ self->data_size = 0;
+
+ return self;
+}
+
+void perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr)
+{
+ int pos = self->attrs;
+
+ if (self->frozen)
+ die("frozen");
+
+ self->attrs++;
+ if (self->attrs > self->size) {
+ self->size *= 2;
+ self->attr = realloc(self->attr, self->size * sizeof(void *));
+ if (!self->attr)
+ die("nomem");
+ }
+ self->attr[pos] = attr;
+}
+
+static const char *__perf_magic = "PERFFILE";
+
+#define PERF_MAGIC (*(u64 *)__perf_magic)
+
+struct perf_file_section {
+ u64 offset;
+ u64 size;
+};
+
+struct perf_file_attr {
+ struct perf_counter_attr attr;
+ struct perf_file_section ids;
+};
+
+struct perf_file_header {
+ u64 magic;
+ u64 size;
+ u64 attr_size;
+ struct perf_file_section attrs;
+ struct perf_file_section data;
+};
+
+static void do_write(int fd, void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(fd, buf, size);
+
+ if (ret < 0)
+ die("failed to write");
+
+ size -= ret;
+ buf += ret;
+ }
+}
+
+void perf_header__write(struct perf_header *self, int fd)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header_attr *attr;
+ int i;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ attr->id_offset = lseek(fd, 0, SEEK_CUR);
+ do_write(fd, attr->id, attr->ids * sizeof(u64));
+ }
+
+
+ self->attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ f_attr = (struct perf_file_attr){
+ .attr = attr->attr,
+ .ids = {
+ .offset = attr->id_offset,
+ .size = attr->ids * sizeof(u64),
+ }
+ };
+ do_write(fd, &f_attr, sizeof(f_attr));
+ }
+
+
+ self->data_offset = lseek(fd, 0, SEEK_CUR);
+
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = self->attr_offset,
+ .size = self->attrs * sizeof(f_attr),
+ },
+ .data = {
+ .offset = self->data_offset,
+ .size = self->data_size,
+ },
+ };
+
+ lseek(fd, 0, SEEK_SET);
+ do_write(fd, &f_header, sizeof(f_header));
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+}
+
+static void do_read(int fd, void *buf, size_t size)
+{
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret < 0)
+ die("failed to read");
+
+ size -= ret;
+ buf += ret;
+ }
+}
+
+struct perf_header *perf_header__read(int fd)
+{
+ struct perf_header *self = perf_header__new();
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+
+ int nr_attrs, nr_ids, i, j;
+
+ lseek(fd, 0, SEEK_SET);
+ do_read(fd, &f_header, sizeof(f_header));
+
+ if (f_header.magic != PERF_MAGIC ||
+ f_header.size != sizeof(f_header) ||
+ f_header.attr_size != sizeof(f_attr))
+ die("incompatible file format");
+
+ nr_attrs = f_header.attrs.size / sizeof(f_attr);
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_header_attr *attr;
+ off_t tmp = lseek(fd, 0, SEEK_CUR);
+
+ do_read(fd, &f_attr, sizeof(f_attr));
+
+ attr = perf_header_attr__new(&f_attr.attr);
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ do_read(fd, &f_id, sizeof(f_id));
+
+ perf_header_attr__add_id(attr, f_id);
+ }
+ perf_header__add_attr(self, attr);
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ self->data_offset = f_header.data.offset;
+ self->data_size = f_header.data.size;
+
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+
+ return self;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
new file mode 100644
index 000000000000..b5ef53ad4c7a
--- /dev/null
+++ b/tools/perf/util/header.h
@@ -0,0 +1,37 @@
+#ifndef _PERF_HEADER_H
+#define _PERF_HEADER_H
+
+#include "../../../include/linux/perf_counter.h"
+#include <sys/types.h>
+#include "types.h"
+
+struct perf_header_attr {
+ struct perf_counter_attr attr;
+ int ids, size;
+ u64 *id;
+ off_t id_offset;
+};
+
+struct perf_header {
+ int frozen;
+ int attrs, size;
+ struct perf_header_attr **attr;
+ off_t attr_offset;
+ u64 data_offset;
+ u64 data_size;
+};
+
+struct perf_header *perf_header__read(int fd);
+void perf_header__write(struct perf_header *self, int fd);
+
+void perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr);
+
+struct perf_header_attr *
+perf_header_attr__new(struct perf_counter_attr *attr);
+void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
+
+
+struct perf_header *perf_header__new(void);
+
+#endif /* _PERF_HEADER_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 6653f7dd1d78..fbb00978b2e2 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -26,7 +26,7 @@ static int term_columns(void)
return 80;
}
-void add_cmdname(struct cmdnames *cmds, const char *name, int len)
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
{
struct cmdname *ent = malloc(sizeof(*ent) + len + 1);
@@ -40,7 +40,8 @@ void add_cmdname(struct cmdnames *cmds, const char *name, int len)
static void clean_cmdnames(struct cmdnames *cmds)
{
- int i;
+ unsigned int i;
+
for (i = 0; i < cmds->cnt; ++i)
free(cmds->names[i]);
free(cmds->names);
@@ -57,7 +58,7 @@ static int cmdname_compare(const void *a_, const void *b_)
static void uniq(struct cmdnames *cmds)
{
- int i, j;
+ unsigned int i, j;
if (!cmds->cnt)
return;
@@ -71,7 +72,7 @@ static void uniq(struct cmdnames *cmds)
void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
{
- int ci, cj, ei;
+ size_t ci, cj, ei;
int cmp;
ci = cj = ei = 0;
@@ -106,8 +107,9 @@ static void pretty_print_string_list(struct cmdnames *cmds, int longest)
printf(" ");
for (j = 0; j < cols; j++) {
- int n = j * rows + i;
- int size = space;
+ unsigned int n = j * rows + i;
+ unsigned int size = space;
+
if (n >= cmds->cnt)
break;
if (j == cols-1 || n + rows >= cmds->cnt)
@@ -126,21 +128,6 @@ static int is_executable(const char *name)
!S_ISREG(st.st_mode))
return 0;
-#ifdef __MINGW32__
- /* cannot trust the executable bit, peek into the file instead */
- char buf[3] = { 0 };
- int n;
- int fd = open(name, O_RDONLY);
- st.st_mode &= ~S_IXUSR;
- if (fd >= 0) {
- n = read(fd, buf, 2);
- if (n == 2)
- /* DOS executables start with "MZ" */
- if (!strcmp(buf, "#!") || !strcmp(buf, "MZ"))
- st.st_mode |= S_IXUSR;
- close(fd);
- }
-#endif
return st.st_mode & S_IXUSR;
}
@@ -223,7 +210,7 @@ void load_command_list(const char *prefix,
void list_commands(const char *title, struct cmdnames *main_cmds,
struct cmdnames *other_cmds)
{
- int i, longest = 0;
+ unsigned int i, longest = 0;
for (i = 0; i < main_cmds->cnt; i++)
if (longest < main_cmds->names[i]->len)
@@ -254,7 +241,8 @@ void list_commands(const char *title, struct cmdnames *main_cmds,
int is_in_cmdlist(struct cmdnames *c, const char *s)
{
- int i;
+ unsigned int i;
+
for (i = 0; i < c->cnt; i++)
if (!strcmp(s, c->names[i]->name))
return 1;
@@ -286,7 +274,8 @@ static int levenshtein_compare(const void *p1, const void *p2)
static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
{
- int i;
+ unsigned int i;
+
ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
for (i = 0; i < old->cnt; i++)
@@ -298,7 +287,7 @@ static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
const char *help_unknown_cmd(const char *cmd)
{
- int i, n = 0, best_similarity = 0;
+ unsigned int i, n = 0, best_similarity = 0;
struct cmdnames main_cmds, other_cmds;
memset(&main_cmds, 0, sizeof(main_cmds));
@@ -360,7 +349,7 @@ const char *help_unknown_cmd(const char *cmd)
exit(1);
}
-int cmd_version(int argc, const char **argv, const char *prefix)
+int cmd_version(int argc __used, const char **argv __used, const char *prefix __used)
{
printf("perf version %s\n", perf_version_string);
return 0;
diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h
index 56bc15406ffc..7128783637b4 100644
--- a/tools/perf/util/help.h
+++ b/tools/perf/util/help.h
@@ -2,8 +2,8 @@
#define HELP_H
struct cmdnames {
- int alloc;
- int cnt;
+ size_t alloc;
+ size_t cnt;
struct cmdname {
size_t len; /* also used for similarity index in help.c */
char name[FLEX_ARRAY];
@@ -19,7 +19,7 @@ static inline void mput_char(char c, unsigned int num)
void load_command_list(const char *prefix,
struct cmdnames *main_cmds,
struct cmdnames *other_cmds);
-void add_cmdname(struct cmdnames *cmds, const char *name, int len);
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len);
/* Here we require that excludes is a sorted list. */
void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
int is_in_cmdlist(struct cmdnames *c, const char *s);
diff --git a/tools/perf/util/include/asm/system.h b/tools/perf/util/include/asm/system.h
new file mode 100644
index 000000000000..710cecca972d
--- /dev/null
+++ b/tools/perf/util/include/asm/system.h
@@ -0,0 +1 @@
+/* Empty */
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
new file mode 100644
index 000000000000..99c1b3d1edd9
--- /dev/null
+++ b/tools/perf/util/include/linux/kernel.h
@@ -0,0 +1,21 @@
+#ifndef PERF_LINUX_KERNEL_H_
+#define PERF_LINUX_KERNEL_H_
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#endif
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
new file mode 100644
index 000000000000..dbe4b814382a
--- /dev/null
+++ b/tools/perf/util/include/linux/list.h
@@ -0,0 +1,18 @@
+#include "../../../../include/linux/list.h"
+
+#ifndef PERF_LIST_H
+#define PERF_LIST_H
+/**
+ * list_del_range - deletes range of entries from list.
+ * @begin: first element in the range to delete from the list.
+ * @end: last element in the range to delete from the list.
+ * Note: list_empty on the range of entries does not return true after this,
+ * the entries is in an undefined state.
+ */
+static inline void list_del_range(struct list_head *begin,
+ struct list_head *end)
+{
+ begin->prev->next = end->next;
+ end->next->prev = begin->prev;
+}
+#endif
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/module.h
new file mode 100644
index 000000000000..b43e2dc21e04
--- /dev/null
+++ b/tools/perf/util/include/linux/module.h
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_MODULE_H
+#define PERF_LINUX_MODULE_H
+
+#define EXPORT_SYMBOL(name)
+
+#endif
diff --git a/tools/perf/util/include/linux/poison.h b/tools/perf/util/include/linux/poison.h
new file mode 100644
index 000000000000..fef6dbc9ce13
--- /dev/null
+++ b/tools/perf/util/include/linux/poison.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/poison.h"
diff --git a/tools/perf/util/include/linux/prefetch.h b/tools/perf/util/include/linux/prefetch.h
new file mode 100644
index 000000000000..7841e485d8c3
--- /dev/null
+++ b/tools/perf/util/include/linux/prefetch.h
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_PREFETCH_H
+#define PERF_LINUX_PREFETCH_H
+
+static inline void prefetch(void *a __attribute__((unused))) { }
+
+#endif
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
new file mode 100644
index 000000000000..7a243a143037
--- /dev/null
+++ b/tools/perf/util/include/linux/rbtree.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/rbtree.h"
diff --git a/tools/perf/util/list.h b/tools/perf/util/list.h
deleted file mode 100644
index e2548e8072cf..000000000000
--- a/tools/perf/util/list.h
+++ /dev/null
@@ -1,603 +0,0 @@
-#ifndef _LINUX_LIST_H
-#define _LINUX_LIST_H
-/*
- Copyright (C) Cast of dozens, comes from the Linux kernel
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-*/
-
-#include <stddef.h>
-
-/*
- * These are non-NULL pointers that will result in page faults
- * under normal circumstances, used to verify that nobody uses
- * non-initialized list entries.
- */
-#define LIST_POISON1 ((void *)0x00100100)
-#define LIST_POISON2 ((void *)0x00200200)
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
-
-/*
- * Simple doubly linked list implementation.
- *
- * Some of the internal functions ("__xxx") are useful when
- * manipulating whole lists rather than single entries, as
- * sometimes we already know the next/prev entries and we can
- * generate better code by using them directly rather than
- * using the generic single-entry routines.
- */
-
-struct list_head {
- struct list_head *next, *prev;
-};
-
-#define LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define LIST_HEAD(name) \
- struct list_head name = LIST_HEAD_INIT(name)
-
-static inline void INIT_LIST_HEAD(struct list_head *list)
-{
- list->next = list;
- list->prev = list;
-}
-
-/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __list_add(struct list_head *new,
- struct list_head *prev,
- struct list_head *next)
-{
- next->prev = new;
- new->next = next;
- new->prev = prev;
- prev->next = new;
-}
-
-/**
- * list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-static inline void list_add(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head, head->next);
-}
-
-/**
- * list_add_tail - add a new entry
- * @new: new entry to be added
- * @head: list head to add it before
- *
- * Insert a new entry before the specified head.
- * This is useful for implementing queues.
- */
-static inline void list_add_tail(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head->prev, head);
-}
-
-/*
- * Delete a list entry by making the prev/next entries
- * point to each other.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __list_del(struct list_head * prev, struct list_head * next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty on entry does not return true after this, the entry is
- * in an undefined state.
- */
-static inline void list_del(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
- entry->next = LIST_POISON1;
- entry->prev = LIST_POISON2;
-}
-
-/**
- * list_del_range - deletes range of entries from list.
- * @beging: first element in the range to delete from the list.
- * @beging: first element in the range to delete from the list.
- * Note: list_empty on the range of entries does not return true after this,
- * the entries is in an undefined state.
- */
-static inline void list_del_range(struct list_head *begin,
- struct list_head *end)
-{
- begin->prev->next = end->next;
- end->next->prev = begin->prev;
-}
-
-/**
- * list_replace - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- * Note: if 'old' was empty, it will be overwritten.
- */
-static inline void list_replace(struct list_head *old,
- struct list_head *new)
-{
- new->next = old->next;
- new->next->prev = new;
- new->prev = old->prev;
- new->prev->next = new;
-}
-
-static inline void list_replace_init(struct list_head *old,
- struct list_head *new)
-{
- list_replace(old, new);
- INIT_LIST_HEAD(old);
-}
-
-/**
- * list_del_init - deletes entry from list and reinitialize it.
- * @entry: the element to delete from the list.
- */
-static inline void list_del_init(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
- INIT_LIST_HEAD(entry);
-}
-
-/**
- * list_move - delete from one list and add as another's head
- * @list: the entry to move
- * @head: the head that will precede our entry
- */
-static inline void list_move(struct list_head *list, struct list_head *head)
-{
- __list_del(list->prev, list->next);
- list_add(list, head);
-}
-
-/**
- * list_move_tail - delete from one list and add as another's tail
- * @list: the entry to move
- * @head: the head that will follow our entry
- */
-static inline void list_move_tail(struct list_head *list,
- struct list_head *head)
-{
- __list_del(list->prev, list->next);
- list_add_tail(list, head);
-}
-
-/**
- * list_is_last - tests whether @list is the last entry in list @head
- * @list: the entry to test
- * @head: the head of the list
- */
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
-{
- return list->next == head;
-}
-
-/**
- * list_empty - tests whether a list is empty
- * @head: the list to test.
- */
-static inline int list_empty(const struct list_head *head)
-{
- return head->next == head;
-}
-
-/**
- * list_empty_careful - tests whether a list is empty and not being modified
- * @head: the list to test
- *
- * Description:
- * tests whether a list is empty _and_ checks that no other CPU might be
- * in the process of modifying either member (next or prev)
- *
- * NOTE: using list_empty_careful() without synchronization
- * can only be safe if the only activity that can happen
- * to the list entry is list_del_init(). Eg. it cannot be used
- * if another CPU could re-list_add() it.
- */
-static inline int list_empty_careful(const struct list_head *head)
-{
- struct list_head *next = head->next;
- return (next == head) && (next == head->prev);
-}
-
-static inline void __list_splice(struct list_head *list,
- struct list_head *head)
-{
- struct list_head *first = list->next;
- struct list_head *last = list->prev;
- struct list_head *at = head->next;
-
- first->prev = head;
- head->next = first;
-
- last->next = at;
- at->prev = last;
-}
-
-/**
- * list_splice - join two lists
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void list_splice(struct list_head *list, struct list_head *head)
-{
- if (!list_empty(list))
- __list_splice(list, head);
-}
-
-/**
- * list_splice_init - join two lists and reinitialise the emptied list.
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * The list at @list is reinitialised
- */
-static inline void list_splice_init(struct list_head *list,
- struct list_head *head)
-{
- if (!list_empty(list)) {
- __list_splice(list, head);
- INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * list_entry - get the struct for this entry
- * @ptr: the &struct list_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- */
-#define list_entry(ptr, type, member) \
- container_of(ptr, type, member)
-
-/**
- * list_first_entry - get the first element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_first_entry(ptr, type, member) \
- list_entry((ptr)->next, type, member)
-
-/**
- * list_for_each - iterate over a list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); \
- pos = pos->next)
-
-/**
- * __list_for_each - iterate over a list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * This variant differs from list_for_each() in that it's the
- * simplest possible list iteration code, no prefetching is done.
- * Use this for code that knows the list to be very short (empty
- * or 1 entry) most of the time.
- */
-#define __list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
-
-/**
- * list_for_each_prev - iterate over a list backwards
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); \
- pos = pos->prev)
-
-/**
- * list_for_each_safe - iterate over a list safe against removal of list entry
- * @pos: the &struct list_head to use as a loop cursor.
- * @n: another &struct list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-
-/**
- * list_for_each_entry - iterate over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define list_for_each_entry(pos, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * list_for_each_entry_reverse - iterate backwards over list of given type.
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define list_for_each_entry_reverse(pos, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue
- * @pos: the type * to use as a start point
- * @head: the head of the list
- * @member: the name of the list_struct within the struct.
- *
- * Prepares a pos entry for use as a start point in list_for_each_entry_continue.
- */
-#define list_prepare_entry(pos, head, member) \
- ((pos) ? : list_entry(head, typeof(*pos), member))
-
-/**
- * list_for_each_entry_continue - continue iteration over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Continue to iterate over list of given type, continuing after
- * the current position.
- */
-#define list_for_each_entry_continue(pos, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * list_for_each_entry_from - iterate over list of given type from the current point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing from current position.
- */
-#define list_for_each_entry_from(pos, head, member) \
- for (; &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define list_for_each_entry_safe(pos, n, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-
-/**
- * list_for_each_entry_safe_continue
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing after current point,
- * safe against removal of list entry.
- */
-#define list_for_each_entry_safe_continue(pos, n, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-
-/**
- * list_for_each_entry_safe_from
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type from current point, safe against
- * removal of list entry.
- */
-#define list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-
-/**
- * list_for_each_entry_safe_reverse
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate backwards over list of given type, safe against removal
- * of list entry.
- */
-#define list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member), \
- n = list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.prev, typeof(*n), member))
-
-/*
- * Double linked lists with a single pointer list head.
- * Mostly useful for hash tables where the two pointer list head is
- * too wasteful.
- * You lose the ability to access the tail in O(1).
- */
-
-struct hlist_head {
- struct hlist_node *first;
-};
-
-struct hlist_node {
- struct hlist_node *next, **pprev;
-};
-
-#define HLIST_HEAD_INIT { .first = NULL }
-#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
-#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
-static inline void INIT_HLIST_NODE(struct hlist_node *h)
-{
- h->next = NULL;
- h->pprev = NULL;
-}
-
-static inline int hlist_unhashed(const struct hlist_node *h)
-{
- return !h->pprev;
-}
-
-static inline int hlist_empty(const struct hlist_head *h)
-{
- return !h->first;
-}
-
-static inline void __hlist_del(struct hlist_node *n)
-{
- struct hlist_node *next = n->next;
- struct hlist_node **pprev = n->pprev;
- *pprev = next;
- if (next)
- next->pprev = pprev;
-}
-
-static inline void hlist_del(struct hlist_node *n)
-{
- __hlist_del(n);
- n->next = LIST_POISON1;
- n->pprev = LIST_POISON2;
-}
-
-static inline void hlist_del_init(struct hlist_node *n)
-{
- if (!hlist_unhashed(n)) {
- __hlist_del(n);
- INIT_HLIST_NODE(n);
- }
-}
-
-static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
-{
- struct hlist_node *first = h->first;
- n->next = first;
- if (first)
- first->pprev = &n->next;
- h->first = n;
- n->pprev = &h->first;
-}
-
-/* next must be != NULL */
-static inline void hlist_add_before(struct hlist_node *n,
- struct hlist_node *next)
-{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
- *(n->pprev) = n;
-}
-
-static inline void hlist_add_after(struct hlist_node *n,
- struct hlist_node *next)
-{
- next->next = n->next;
- n->next = next;
- next->pprev = &n->next;
-
- if(next->next)
- next->next->pprev = &next->next;
-}
-
-#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
-
-#define hlist_for_each(pos, head) \
- for (pos = (head)->first; pos; \
- pos = pos->next)
-
-#define hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
- pos = n)
-
-/**
- * hlist_for_each_entry - iterate over list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- */
-#define hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
- * @member: the name of the hlist_node within the struct.
- */
-#define hlist_for_each_entry_continue(tpos, pos, member) \
- for (pos = (pos)->next; \
- pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
- * @member: the name of the hlist_node within the struct.
- */
-#define hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
- * @n: another &struct hlist_node to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- */
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
-
-#endif
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
new file mode 100644
index 000000000000..ddabe925d65d
--- /dev/null
+++ b/tools/perf/util/module.c
@@ -0,0 +1,509 @@
+#include "util.h"
+#include "../perf.h"
+#include "string.h"
+#include "module.h"
+
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#include <dirent.h>
+#include <sys/utsname.h>
+
+static unsigned int crc32(const char *p, unsigned int len)
+{
+ int i;
+ unsigned int crc = 0;
+
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
+
+/* module section methods */
+
+struct sec_dso *sec_dso__new_dso(const char *name)
+{
+ struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ strcpy(self->name, name);
+ self->secs = RB_ROOT;
+ self->find_section = sec_dso__find_section;
+ }
+
+ return self;
+}
+
+static void sec_dso__delete_section(struct section *self)
+{
+ free(((void *)self));
+}
+
+void sec_dso__delete_sections(struct sec_dso *self)
+{
+ struct section *pos;
+ struct rb_node *next = rb_first(&self->secs);
+
+ while (next) {
+ pos = rb_entry(next, struct section, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &self->secs);
+ sec_dso__delete_section(pos);
+ }
+}
+
+void sec_dso__delete_self(struct sec_dso *self)
+{
+ sec_dso__delete_sections(self);
+ free(self);
+}
+
+static void sec_dso__insert_section(struct sec_dso *self, struct section *sec)
+{
+ struct rb_node **p = &self->secs.rb_node;
+ struct rb_node *parent = NULL;
+ const u64 hash = sec->hash;
+ struct section *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct section, rb_node);
+ if (hash < s->hash)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&sec->rb_node, parent, p);
+ rb_insert_color(&sec->rb_node, &self->secs);
+}
+
+struct section *sec_dso__find_section(struct sec_dso *self, const char *name)
+{
+ struct rb_node *n;
+ u64 hash;
+ int len;
+
+ if (self == NULL)
+ return NULL;
+
+ len = strlen(name);
+ hash = crc32(name, len);
+
+ n = self->secs.rb_node;
+
+ while (n) {
+ struct section *s = rb_entry(n, struct section, rb_node);
+
+ if (hash < s->hash)
+ n = n->rb_left;
+ else if (hash > s->hash)
+ n = n->rb_right;
+ else {
+ if (!strcmp(name, s->name))
+ return s;
+ else
+ n = rb_next(&s->rb_node);
+ }
+ }
+
+ return NULL;
+}
+
+static size_t sec_dso__fprintf_section(struct section *self, FILE *fp)
+{
+ return fprintf(fp, "name:%s vma:%llx path:%s\n",
+ self->name, self->vma, self->path);
+}
+
+size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp)
+{
+ size_t ret = fprintf(fp, "dso: %s\n", self->name);
+
+ struct rb_node *nd;
+ for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) {
+ struct section *pos = rb_entry(nd, struct section, rb_node);
+ ret += sec_dso__fprintf_section(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct section *section__new(const char *name, const char *path)
+{
+ struct section *self = calloc(1, sizeof(*self));
+
+ if (!self)
+ goto out_failure;
+
+ self->name = calloc(1, strlen(name) + 1);
+ if (!self->name)
+ goto out_failure;
+
+ self->path = calloc(1, strlen(path) + 1);
+ if (!self->path)
+ goto out_failure;
+
+ strcpy(self->name, name);
+ strcpy(self->path, path);
+ self->hash = crc32(self->name, strlen(name));
+
+ return self;
+
+out_failure:
+ if (self) {
+ if (self->name)
+ free(self->name);
+ if (self->path)
+ free(self->path);
+ free(self);
+ }
+
+ return NULL;
+}
+
+/* module methods */
+
+struct mod_dso *mod_dso__new_dso(const char *name)
+{
+ struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ strcpy(self->name, name);
+ self->mods = RB_ROOT;
+ self->find_module = mod_dso__find_module;
+ }
+
+ return self;
+}
+
+static void mod_dso__delete_module(struct module *self)
+{
+ free(((void *)self));
+}
+
+void mod_dso__delete_modules(struct mod_dso *self)
+{
+ struct module *pos;
+ struct rb_node *next = rb_first(&self->mods);
+
+ while (next) {
+ pos = rb_entry(next, struct module, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &self->mods);
+ mod_dso__delete_module(pos);
+ }
+}
+
+void mod_dso__delete_self(struct mod_dso *self)
+{
+ mod_dso__delete_modules(self);
+ free(self);
+}
+
+static void mod_dso__insert_module(struct mod_dso *self, struct module *mod)
+{
+ struct rb_node **p = &self->mods.rb_node;
+ struct rb_node *parent = NULL;
+ const u64 hash = mod->hash;
+ struct module *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct module, rb_node);
+ if (hash < m->hash)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mod->rb_node, parent, p);
+ rb_insert_color(&mod->rb_node, &self->mods);
+}
+
+struct module *mod_dso__find_module(struct mod_dso *self, const char *name)
+{
+ struct rb_node *n;
+ u64 hash;
+ int len;
+
+ if (self == NULL)
+ return NULL;
+
+ len = strlen(name);
+ hash = crc32(name, len);
+
+ n = self->mods.rb_node;
+
+ while (n) {
+ struct module *m = rb_entry(n, struct module, rb_node);
+
+ if (hash < m->hash)
+ n = n->rb_left;
+ else if (hash > m->hash)
+ n = n->rb_right;
+ else {
+ if (!strcmp(name, m->name))
+ return m;
+ else
+ n = rb_next(&m->rb_node);
+ }
+ }
+
+ return NULL;
+}
+
+static size_t mod_dso__fprintf_module(struct module *self, FILE *fp)
+{
+ return fprintf(fp, "name:%s path:%s\n", self->name, self->path);
+}
+
+size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret;
+
+ ret = fprintf(fp, "dso: %s\n", self->name);
+
+ for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) {
+ struct module *pos = rb_entry(nd, struct module, rb_node);
+
+ ret += mod_dso__fprintf_module(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct module *module__new(const char *name, const char *path)
+{
+ struct module *self = calloc(1, sizeof(*self));
+
+ if (!self)
+ goto out_failure;
+
+ self->name = calloc(1, strlen(name) + 1);
+ if (!self->name)
+ goto out_failure;
+
+ self->path = calloc(1, strlen(path) + 1);
+ if (!self->path)
+ goto out_failure;
+
+ strcpy(self->name, name);
+ strcpy(self->path, path);
+ self->hash = crc32(self->name, strlen(name));
+
+ return self;
+
+out_failure:
+ if (self) {
+ if (self->name)
+ free(self->name);
+ if (self->path)
+ free(self->path);
+ free(self);
+ }
+
+ return NULL;
+}
+
+static int mod_dso__load_sections(struct module *mod)
+{
+ int count = 0, path_len;
+ struct dirent *entry;
+ char *line = NULL;
+ char *dir_path;
+ DIR *dir;
+ size_t n;
+
+ path_len = strlen("/sys/module/");
+ path_len += strlen(mod->name);
+ path_len += strlen("/sections/");
+
+ dir_path = calloc(1, path_len + 1);
+ if (dir_path == NULL)
+ goto out_failure;
+
+ strcat(dir_path, "/sys/module/");
+ strcat(dir_path, mod->name);
+ strcat(dir_path, "/sections/");
+
+ dir = opendir(dir_path);
+ if (dir == NULL)
+ goto out_free;
+
+ while ((entry = readdir(dir))) {
+ struct section *section;
+ char *path, *vma;
+ int line_len;
+ FILE *file;
+
+ if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name))
+ continue;
+
+ path = calloc(1, path_len + strlen(entry->d_name) + 1);
+ if (path == NULL)
+ break;
+ strcat(path, dir_path);
+ strcat(path, entry->d_name);
+
+ file = fopen(path, "r");
+ if (file == NULL) {
+ free(path);
+ break;
+ }
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0) {
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ if (!line) {
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ line[--line_len] = '\0'; /* \n */
+
+ vma = strstr(line, "0x");
+ if (!vma) {
+ free(path);
+ fclose(file);
+ break;
+ }
+ vma += 2;
+
+ section = section__new(entry->d_name, path);
+ if (!section) {
+ fprintf(stderr, "load_sections: allocation error\n");
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ hex2u64(vma, &section->vma);
+ sec_dso__insert_section(mod->sections, section);
+
+ free(path);
+ fclose(file);
+ count++;
+ }
+
+ closedir(dir);
+ free(line);
+ free(dir_path);
+
+ return count;
+
+out_free:
+ free(dir_path);
+
+out_failure:
+ return count;
+}
+
+static int mod_dso__load_module_paths(struct mod_dso *self)
+{
+ struct utsname uts;
+ int count = 0, len;
+ char *line = NULL;
+ FILE *file;
+ char *path;
+ size_t n;
+
+ if (uname(&uts) < 0)
+ goto out_failure;
+
+ len = strlen("/lib/modules/");
+ len += strlen(uts.release);
+ len += strlen("/modules.dep");
+
+ path = calloc(1, len);
+ if (path == NULL)
+ goto out_failure;
+
+ strcat(path, "/lib/modules/");
+ strcat(path, uts.release);
+ strcat(path, "/modules.dep");
+
+ file = fopen(path, "r");
+ free(path);
+ if (file == NULL)
+ goto out_failure;
+
+ while (!feof(file)) {
+ char *path, *name, *tmp;
+ struct module *module;
+ int line_len, len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ path = strtok(line, ":");
+ if (!path)
+ goto out_failure;
+
+ name = strdup(path);
+ name = strtok(name, "/");
+
+ tmp = name;
+
+ while (tmp) {
+ tmp = strtok(NULL, "/");
+ if (tmp)
+ name = tmp;
+ }
+ name = strsep(&name, ".");
+
+ /* Quirk: replace '-' with '_' in sound modules */
+ for (len = strlen(name); len; len--) {
+ if (*(name+len) == '-')
+ *(name+len) = '_';
+ }
+
+ module = module__new(name, path);
+ if (!module) {
+ fprintf(stderr, "load_module_paths: allocation error\n");
+ goto out_failure;
+ }
+ mod_dso__insert_module(self, module);
+
+ module->sections = sec_dso__new_dso("sections");
+ if (!module->sections) {
+ fprintf(stderr, "load_module_paths: allocation error\n");
+ goto out_failure;
+ }
+
+ module->active = mod_dso__load_sections(module);
+
+ if (module->active > 0)
+ count++;
+ }
+
+ free(line);
+ fclose(file);
+
+ return count;
+
+out_failure:
+ return -1;
+}
+
+int mod_dso__load_modules(struct mod_dso *dso)
+{
+ int err;
+
+ err = mod_dso__load_module_paths(dso);
+
+ return err;
+}
diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h
new file mode 100644
index 000000000000..8a592ef641ca
--- /dev/null
+++ b/tools/perf/util/module.h
@@ -0,0 +1,53 @@
+#ifndef _PERF_MODULE_
+#define _PERF_MODULE_ 1
+
+#include <linux/types.h>
+#include "../types.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct section {
+ struct rb_node rb_node;
+ u64 hash;
+ u64 vma;
+ char *name;
+ char *path;
+};
+
+struct sec_dso {
+ struct list_head node;
+ struct rb_root secs;
+ struct section *(*find_section)(struct sec_dso *, const char *name);
+ char name[0];
+};
+
+struct module {
+ struct rb_node rb_node;
+ u64 hash;
+ char *name;
+ char *path;
+ struct sec_dso *sections;
+ int active;
+};
+
+struct mod_dso {
+ struct list_head node;
+ struct rb_root mods;
+ struct module *(*find_module)(struct mod_dso *, const char *name);
+ char name[0];
+};
+
+struct sec_dso *sec_dso__new_dso(const char *name);
+void sec_dso__delete_sections(struct sec_dso *self);
+void sec_dso__delete_self(struct sec_dso *self);
+size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp);
+struct section *sec_dso__find_section(struct sec_dso *self, const char *name);
+
+struct mod_dso *mod_dso__new_dso(const char *name);
+void mod_dso__delete_modules(struct mod_dso *self);
+void mod_dso__delete_self(struct mod_dso *self);
+size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp);
+struct module *mod_dso__find_module(struct mod_dso *self, const char *name);
+int mod_dso__load_modules(struct mod_dso *dso);
+
+#endif /* _PERF_MODULE_ */
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index a28bccae5458..1915de20dcac 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -9,7 +9,6 @@
static int spawned_pager;
-#ifndef __MINGW32__
static void pager_preexec(void)
{
/*
@@ -24,7 +23,6 @@ static void pager_preexec(void)
setenv("LESS", "FRSX", 0);
}
-#endif
static const char *pager_argv[] = { "sh", "-c", NULL, NULL };
static struct child_process pager_process;
@@ -70,9 +68,8 @@ void setup_pager(void)
pager_argv[2] = pager;
pager_process.argv = pager_argv;
pager_process.in = -1;
-#ifndef __MINGW32__
pager_process.preexec_cb = pager_preexec;
-#endif
+
if (start_command(&pager_process))
return;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 35d04da38d6a..5184959e0615 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -16,32 +16,28 @@ struct event_symbol {
u8 type;
u64 config;
char *symbol;
+ char *alias;
};
-#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y
-#define CR(x, y) .type = PERF_TYPE_##x, .config = y
+#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
+#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
static struct event_symbol event_symbols[] = {
- { C(HARDWARE, HW_CPU_CYCLES), "cpu-cycles", },
- { C(HARDWARE, HW_CPU_CYCLES), "cycles", },
- { C(HARDWARE, HW_INSTRUCTIONS), "instructions", },
- { C(HARDWARE, HW_CACHE_REFERENCES), "cache-references", },
- { C(HARDWARE, HW_CACHE_MISSES), "cache-misses", },
- { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branch-instructions", },
- { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branches", },
- { C(HARDWARE, HW_BRANCH_MISSES), "branch-misses", },
- { C(HARDWARE, HW_BUS_CYCLES), "bus-cycles", },
-
- { C(SOFTWARE, SW_CPU_CLOCK), "cpu-clock", },
- { C(SOFTWARE, SW_TASK_CLOCK), "task-clock", },
- { C(SOFTWARE, SW_PAGE_FAULTS), "page-faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS), "faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS_MIN), "minor-faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS_MAJ), "major-faults", },
- { C(SOFTWARE, SW_CONTEXT_SWITCHES), "context-switches", },
- { C(SOFTWARE, SW_CONTEXT_SWITCHES), "cs", },
- { C(SOFTWARE, SW_CPU_MIGRATIONS), "cpu-migrations", },
- { C(SOFTWARE, SW_CPU_MIGRATIONS), "migrations", },
+ { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
+ { CHW(INSTRUCTIONS), "instructions", "" },
+ { CHW(CACHE_REFERENCES), "cache-references", "" },
+ { CHW(CACHE_MISSES), "cache-misses", "" },
+ { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
+ { CHW(BRANCH_MISSES), "branch-misses", "" },
+ { CHW(BUS_CYCLES), "bus-cycles", "" },
+
+ { CSW(CPU_CLOCK), "cpu-clock", "" },
+ { CSW(TASK_CLOCK), "task-clock", "" },
+ { CSW(PAGE_FAULTS), "page-faults", "faults" },
+ { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
+ { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
+ { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
+ { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
};
#define __PERF_COUNTER_FIELD(config, name) \
@@ -74,26 +70,70 @@ static char *sw_event_names[] = {
#define MAX_ALIASES 8
-static char *hw_cache [][MAX_ALIASES] = {
- { "L1-data" , "l1-d", "l1d" },
- { "L1-instruction" , "l1-i", "l1i" },
- { "L2" , "l2" },
- { "Data-TLB" , "dtlb", "d-tlb" },
- { "Instruction-TLB" , "itlb", "i-tlb" },
- { "Branch" , "bpu" , "btb", "bpc" },
+static char *hw_cache[][MAX_ALIASES] = {
+ { "L1-d$", "l1-d", "l1d", "L1-data", },
+ { "L1-i$", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2" },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
};
-static char *hw_cache_op [][MAX_ALIASES] = {
- { "Load" , "read" },
- { "Store" , "write" },
- { "Prefetch" , "speculative-read", "speculative-load" },
+static char *hw_cache_op[][MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-static char *hw_cache_result [][MAX_ALIASES] = {
- { "Reference" , "ops", "access" },
- { "Miss" },
+static char *hw_cache_result[][MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
};
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+};
+
+static int is_cache_op_valid(u8 cache_type, u8 cache_op)
+{
+ if (hw_cache_stat[cache_type] & COP(cache_op))
+ return 1; /* valid */
+ else
+ return 0; /* invalid */
+}
+
+static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
+{
+ static char name[50];
+
+ if (cache_result) {
+ sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][0],
+ hw_cache_result[cache_result][0]);
+ } else {
+ sprintf(name, "%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][1]);
+ }
+
+ return name;
+}
+
char *event_name(int counter)
{
u64 config = attrs[counter].config;
@@ -113,7 +153,6 @@ char *event_name(int counter)
case PERF_TYPE_HW_CACHE: {
u8 cache_type, cache_op, cache_result;
- static char name[100];
cache_type = (config >> 0) & 0xff;
if (cache_type > PERF_COUNT_HW_CACHE_MAX)
@@ -127,12 +166,10 @@ char *event_name(int counter)
if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
return "unknown-ext-hardware-cache-result";
- sprintf(name, "%s-Cache-%s-%ses",
- hw_cache[cache_type][0],
- hw_cache_op[cache_op][0],
- hw_cache_result[cache_result][0]);
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return "invalid-cache";
- return name;
+ return event_cache_name(cache_type, cache_op, cache_result);
}
case PERF_TYPE_SOFTWARE:
@@ -147,43 +184,74 @@ char *event_name(int counter)
return "unknown";
}
-static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size)
+static int parse_aliases(const char **str, char *names[][MAX_ALIASES], int size)
{
int i, j;
+ int n, longest = -1;
for (i = 0; i < size; i++) {
- for (j = 0; j < MAX_ALIASES; j++) {
- if (!names[i][j])
- break;
- if (strcasestr(str, names[i][j]))
- return i;
+ for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+ n = strlen(names[i][j]);
+ if (n > longest && !strncasecmp(*str, names[i][j], n))
+ longest = n;
+ }
+ if (longest > 0) {
+ *str += longest;
+ return i;
}
}
return -1;
}
-static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr)
+static int
+parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
{
- int cache_type = -1, cache_op = 0, cache_result = 0;
+ const char *s = *str;
+ int cache_type = -1, cache_op = -1, cache_result = -1;
- cache_type = parse_aliases(str, hw_cache, PERF_COUNT_HW_CACHE_MAX);
+ cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
/*
* No fallback - if we cannot get a clear cache type
* then bail out:
*/
if (cache_type == -1)
- return -EINVAL;
+ return 0;
+
+ while ((cache_op == -1 || cache_result == -1) && *s == '-') {
+ ++s;
+
+ if (cache_op == -1) {
+ cache_op = parse_aliases(&s, hw_cache_op,
+ PERF_COUNT_HW_CACHE_OP_MAX);
+ if (cache_op >= 0) {
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return 0;
+ continue;
+ }
+ }
+
+ if (cache_result == -1) {
+ cache_result = parse_aliases(&s, hw_cache_result,
+ PERF_COUNT_HW_CACHE_RESULT_MAX);
+ if (cache_result >= 0)
+ continue;
+ }
+
+ /*
+ * Can't parse this as a cache op or result, so back up
+ * to the '-'.
+ */
+ --s;
+ break;
+ }
- cache_op = parse_aliases(str, hw_cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
/*
* Fall back to reads:
*/
if (cache_op == -1)
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
- cache_result = parse_aliases(str, hw_cache_result,
- PERF_COUNT_HW_CACHE_RESULT_MAX);
/*
* Fall back to accesses:
*/
@@ -193,82 +261,154 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a
attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
attr->type = PERF_TYPE_HW_CACHE;
+ *str = s;
+ return 1;
+}
+
+static int check_events(const char *str, unsigned int i)
+{
+ int n;
+
+ n = strlen(event_symbols[i].symbol);
+ if (!strncmp(str, event_symbols[i].symbol, n))
+ return n;
+
+ n = strlen(event_symbols[i].alias);
+ if (n)
+ if (!strncmp(str, event_symbols[i].alias, n))
+ return n;
return 0;
}
-/*
- * Each event can have multiple symbolic names.
- * Symbolic names are (almost) exactly matched.
- */
-static int parse_event_symbols(const char *str, struct perf_counter_attr *attr)
+static int
+parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
{
- u64 config, id;
- int type;
+ const char *str = *strp;
unsigned int i;
- const char *sep, *pstr;
+ int n;
- if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) {
- attr->type = PERF_TYPE_RAW;
- attr->config = config;
+ for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
+ n = check_events(str, i);
+ if (n > 0) {
+ attr->type = event_symbols[i].type;
+ attr->config = event_symbols[i].config;
+ *strp = str + n;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int parse_raw_event(const char **strp, struct perf_counter_attr *attr)
+{
+ const char *str = *strp;
+ u64 config;
+ int n;
+ if (*str != 'r')
return 0;
+ n = hex2u64(str + 1, &config);
+ if (n > 0) {
+ *strp = str + n + 1;
+ attr->type = PERF_TYPE_RAW;
+ attr->config = config;
+ return 1;
}
+ return 0;
+}
- pstr = str;
- sep = strchr(pstr, ':');
- if (sep) {
- type = atoi(pstr);
- pstr = sep + 1;
- id = atoi(pstr);
- sep = strchr(pstr, ':');
- if (sep) {
- pstr = sep + 1;
- if (strchr(pstr, 'k'))
- attr->exclude_user = 1;
- if (strchr(pstr, 'u'))
- attr->exclude_kernel = 1;
+static int
+parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
+{
+ const char *str = *strp;
+ char *endp;
+ unsigned long type;
+ u64 config;
+
+ type = strtoul(str, &endp, 0);
+ if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
+ str = endp + 1;
+ config = strtoul(str, &endp, 0);
+ if (endp > str) {
+ attr->type = type;
+ attr->config = config;
+ *strp = endp;
+ return 1;
}
- attr->type = type;
- attr->config = id;
+ }
+ return 0;
+}
+
+static int
+parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
+{
+ const char *str = *strp;
+ int eu = 1, ek = 1, eh = 1;
+ if (*str++ != ':')
return 0;
+ while (*str) {
+ if (*str == 'u')
+ eu = 0;
+ else if (*str == 'k')
+ ek = 0;
+ else if (*str == 'h')
+ eh = 0;
+ else
+ break;
+ ++str;
}
+ if (str >= *strp + 2) {
+ *strp = str;
+ attr->exclude_user = eu;
+ attr->exclude_kernel = ek;
+ attr->exclude_hv = eh;
+ return 1;
+ }
+ return 0;
+}
- for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
- if (!strncmp(str, event_symbols[i].symbol,
- strlen(event_symbols[i].symbol))) {
-
- attr->type = event_symbols[i].type;
- attr->config = event_symbols[i].config;
+/*
+ * Each event can have multiple symbolic names.
+ * Symbolic names are (almost) exactly matched.
+ */
+static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
+{
+ if (!(parse_raw_event(str, attr) ||
+ parse_numeric_event(str, attr) ||
+ parse_symbolic_event(str, attr) ||
+ parse_generic_hw_event(str, attr)))
+ return 0;
- return 0;
- }
- }
+ parse_event_modifier(str, attr);
- return parse_generic_hw_symbols(str, attr);
+ return 1;
}
-int parse_events(const struct option *opt, const char *str, int unset)
+int parse_events(const struct option *opt __used, const char *str, int unset __used)
{
struct perf_counter_attr attr;
- int ret;
- memset(&attr, 0, sizeof(attr));
-again:
- if (nr_counters == MAX_COUNTERS)
- return -1;
+ for (;;) {
+ if (nr_counters == MAX_COUNTERS)
+ return -1;
+
+ memset(&attr, 0, sizeof(attr));
+ if (!parse_event_symbols(&str, &attr))
+ return -1;
- ret = parse_event_symbols(str, &attr);
- if (ret < 0)
- return ret;
+ if (!(*str == 0 || *str == ',' || isspace(*str)))
+ return -1;
- attrs[nr_counters] = attr;
- nr_counters++;
+ attrs[nr_counters] = attr;
+ nr_counters++;
- str = strstr(str, ",");
- if (str) {
- str++;
- goto again;
+ if (*str == 0)
+ break;
+ if (*str == ',')
+ ++str;
+ while (isspace(*str))
+ ++str;
}
return 0;
@@ -288,7 +428,8 @@ static const char * const event_type_descriptors[] = {
void print_events(void)
{
struct event_symbol *syms = event_symbols;
- unsigned int i, type, prev_type = -1;
+ unsigned int i, type, op, prev_type = -1;
+ char name[40];
fprintf(stderr, "\n");
fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
@@ -301,14 +442,33 @@ void print_events(void)
if (type != prev_type)
fprintf(stderr, "\n");
- fprintf(stderr, " %-30s [%s]\n", syms->symbol,
+ if (strlen(syms->alias))
+ sprintf(name, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strcpy(name, syms->symbol);
+ fprintf(stderr, " %-40s [%s]\n", name,
event_type_descriptors[type]);
prev_type = type;
}
fprintf(stderr, "\n");
- fprintf(stderr, " %-30s [raw hardware event descriptor]\n",
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ fprintf(stderr, " %-40s [%s]\n",
+ event_cache_name(type, op, i),
+ event_type_descriptors[4]);
+ }
+ }
+ }
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
"rNNN");
fprintf(stderr, "\n");
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index b3affb1658d2..1bf67190c820 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -20,7 +20,8 @@ static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
if (p->opt) {
*arg = p->opt;
p->opt = NULL;
- } else if (p->argc == 1 && (opt->flags & PARSE_OPT_LASTARG_DEFAULT)) {
+ } else if ((opt->flags & PARSE_OPT_LASTARG_DEFAULT) && (p->argc == 1 ||
+ **(p->argv + 1) == '-')) {
*arg = (const char *)opt->defval;
} else if (p->argc > 1) {
p->argc--;
@@ -485,7 +486,7 @@ int parse_options_usage(const char * const *usagestr,
}
-int parse_opt_verbosity_cb(const struct option *opt, const char *arg,
+int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used,
int unset)
{
int *target = opt->value;
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index a1039a6ce0eb..8aa3464c7090 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -90,21 +90,22 @@ struct option {
intptr_t defval;
};
-#define OPT_END() { OPTION_END }
-#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) }
-#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
-#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) }
-#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) }
-#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, (h), 0, NULL, (i) }
-#define OPT_SET_PTR(s, l, v, h, p) { OPTION_SET_PTR, (s), (l), (v), NULL, (h), 0, NULL, (p) }
-#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), NULL, (h) }
-#define OPT_LONG(s, l, v, h) { OPTION_LONG, (s), (l), (v), NULL, (h) }
-#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) }
+#define OPT_END() { .type = OPTION_END }
+#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) }
+#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
+#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) }
+#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+#define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) }
+#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
+#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h) }
#define OPT_DATE(s, l, v, h) \
- { OPTION_CALLBACK, (s), (l), (v), "time",(h), 0, \
- parse_opt_approxidate_cb }
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
#define OPT_CALLBACK(s, l, v, a, h, f) \
- { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) }
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) }
+#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
/* parse_options() will filter out the processed options and leave the
* non-option argments in argv[].
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
index f18c5212bc92..c6e5dc0dc82f 100644
--- a/tools/perf/util/quote.c
+++ b/tools/perf/util/quote.c
@@ -162,12 +162,16 @@ static inline int sq_must_quote(char c)
return sq_lookup[(unsigned char)c] + quote_path_fully > 0;
}
-/* returns the longest prefix not needing a quote up to maxlen if positive.
- This stops at the first \0 because it's marked as a character needing an
- escape */
-static size_t next_quote_pos(const char *s, ssize_t maxlen)
+/*
+ * Returns the longest prefix not needing a quote up to maxlen if
+ * positive.
+ * This stops at the first \0 because it's marked as a character
+ * needing an escape.
+ */
+static ssize_t next_quote_pos(const char *s, ssize_t maxlen)
{
- size_t len;
+ ssize_t len;
+
if (maxlen < 0) {
for (len = 0; !sq_must_quote(s[len]); len++);
} else {
@@ -192,22 +196,22 @@ static size_t next_quote_pos(const char *s, ssize_t maxlen)
static size_t quote_c_style_counted(const char *name, ssize_t maxlen,
struct strbuf *sb, FILE *fp, int no_dq)
{
-#undef EMIT
-#define EMIT(c) \
- do { \
- if (sb) strbuf_addch(sb, (c)); \
- if (fp) fputc((c), fp); \
- count++; \
+#define EMIT(c) \
+ do { \
+ if (sb) strbuf_addch(sb, (c)); \
+ if (fp) fputc((c), fp); \
+ count++; \
} while (0)
-#define EMITBUF(s, l) \
- do { \
- int __ret; \
- if (sb) strbuf_add(sb, (s), (l)); \
- if (fp) __ret = fwrite((s), (l), 1, fp); \
- count += (l); \
+
+#define EMITBUF(s, l) \
+ do { \
+ int __ret; \
+ if (sb) strbuf_add(sb, (s), (l)); \
+ if (fp) __ret = fwrite((s), (l), 1, fp); \
+ count += (l); \
} while (0)
- size_t len, count = 0;
+ ssize_t len, count = 0;
const char *p = name;
for (;;) {
@@ -273,8 +277,8 @@ void write_name_quoted(const char *name, FILE *fp, int terminator)
fputc(terminator, fp);
}
-extern void write_name_quotedpfx(const char *pfx, size_t pfxlen,
- const char *name, FILE *fp, int terminator)
+void write_name_quotedpfx(const char *pfx, ssize_t pfxlen,
+ const char *name, FILE *fp, int terminator)
{
int needquote = 0;
@@ -306,7 +310,7 @@ char *quote_path_relative(const char *in, int len,
len = strlen(in);
/* "../" prefix itself does not need quoting, but "in" might. */
- needquote = next_quote_pos(in, len) < len;
+ needquote = (next_quote_pos(in, len) < len);
strbuf_setlen(out, 0);
strbuf_grow(out, len);
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
index 5dfad89816db..a5454a1d1c13 100644
--- a/tools/perf/util/quote.h
+++ b/tools/perf/util/quote.h
@@ -53,7 +53,7 @@ extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq
extern void quote_two_c_style(struct strbuf *, const char *, const char *, int);
extern void write_name_quoted(const char *name, FILE *, int terminator);
-extern void write_name_quotedpfx(const char *pfx, size_t pfxlen,
+extern void write_name_quotedpfx(const char *pfx, ssize_t pfxlen,
const char *name, FILE *, int terminator);
/* quote path as relative to the given prefix */
diff --git a/tools/perf/util/rbtree.c b/tools/perf/util/rbtree.c
deleted file mode 100644
index b15ba9c7cb3f..000000000000
--- a/tools/perf/util/rbtree.c
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- Red Black Trees
- (C) 1999 Andrea Arcangeli <andrea@suse.de>
- (C) 2002 David Woodhouse <dwmw2@infradead.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- linux/lib/rbtree.c
-*/
-
-#include "rbtree.h"
-
-static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *right = node->rb_right;
- struct rb_node *parent = rb_parent(node);
-
- if ((node->rb_right = right->rb_left))
- rb_set_parent(right->rb_left, node);
- right->rb_left = node;
-
- rb_set_parent(right, parent);
-
- if (parent)
- {
- if (node == parent->rb_left)
- parent->rb_left = right;
- else
- parent->rb_right = right;
- }
- else
- root->rb_node = right;
- rb_set_parent(node, right);
-}
-
-static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *left = node->rb_left;
- struct rb_node *parent = rb_parent(node);
-
- if ((node->rb_left = left->rb_right))
- rb_set_parent(left->rb_right, node);
- left->rb_right = node;
-
- rb_set_parent(left, parent);
-
- if (parent)
- {
- if (node == parent->rb_right)
- parent->rb_right = left;
- else
- parent->rb_left = left;
- }
- else
- root->rb_node = left;
- rb_set_parent(node, left);
-}
-
-void rb_insert_color(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *parent, *gparent;
-
- while ((parent = rb_parent(node)) && rb_is_red(parent))
- {
- gparent = rb_parent(parent);
-
- if (parent == gparent->rb_left)
- {
- {
- register struct rb_node *uncle = gparent->rb_right;
- if (uncle && rb_is_red(uncle))
- {
- rb_set_black(uncle);
- rb_set_black(parent);
- rb_set_red(gparent);
- node = gparent;
- continue;
- }
- }
-
- if (parent->rb_right == node)
- {
- register struct rb_node *tmp;
- __rb_rotate_left(parent, root);
- tmp = parent;
- parent = node;
- node = tmp;
- }
-
- rb_set_black(parent);
- rb_set_red(gparent);
- __rb_rotate_right(gparent, root);
- } else {
- {
- register struct rb_node *uncle = gparent->rb_left;
- if (uncle && rb_is_red(uncle))
- {
- rb_set_black(uncle);
- rb_set_black(parent);
- rb_set_red(gparent);
- node = gparent;
- continue;
- }
- }
-
- if (parent->rb_left == node)
- {
- register struct rb_node *tmp;
- __rb_rotate_right(parent, root);
- tmp = parent;
- parent = node;
- node = tmp;
- }
-
- rb_set_black(parent);
- rb_set_red(gparent);
- __rb_rotate_left(gparent, root);
- }
- }
-
- rb_set_black(root->rb_node);
-}
-
-static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
- struct rb_root *root)
-{
- struct rb_node *other;
-
- while ((!node || rb_is_black(node)) && node != root->rb_node)
- {
- if (parent->rb_left == node)
- {
- other = parent->rb_right;
- if (rb_is_red(other))
- {
- rb_set_black(other);
- rb_set_red(parent);
- __rb_rotate_left(parent, root);
- other = parent->rb_right;
- }
- if ((!other->rb_left || rb_is_black(other->rb_left)) &&
- (!other->rb_right || rb_is_black(other->rb_right)))
- {
- rb_set_red(other);
- node = parent;
- parent = rb_parent(node);
- }
- else
- {
- if (!other->rb_right || rb_is_black(other->rb_right))
- {
- rb_set_black(other->rb_left);
- rb_set_red(other);
- __rb_rotate_right(other, root);
- other = parent->rb_right;
- }
- rb_set_color(other, rb_color(parent));
- rb_set_black(parent);
- rb_set_black(other->rb_right);
- __rb_rotate_left(parent, root);
- node = root->rb_node;
- break;
- }
- }
- else
- {
- other = parent->rb_left;
- if (rb_is_red(other))
- {
- rb_set_black(other);
- rb_set_red(parent);
- __rb_rotate_right(parent, root);
- other = parent->rb_left;
- }
- if ((!other->rb_left || rb_is_black(other->rb_left)) &&
- (!other->rb_right || rb_is_black(other->rb_right)))
- {
- rb_set_red(other);
- node = parent;
- parent = rb_parent(node);
- }
- else
- {
- if (!other->rb_left || rb_is_black(other->rb_left))
- {
- rb_set_black(other->rb_right);
- rb_set_red(other);
- __rb_rotate_left(other, root);
- other = parent->rb_left;
- }
- rb_set_color(other, rb_color(parent));
- rb_set_black(parent);
- rb_set_black(other->rb_left);
- __rb_rotate_right(parent, root);
- node = root->rb_node;
- break;
- }
- }
- }
- if (node)
- rb_set_black(node);
-}
-
-void rb_erase(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *child, *parent;
- int color;
-
- if (!node->rb_left)
- child = node->rb_right;
- else if (!node->rb_right)
- child = node->rb_left;
- else
- {
- struct rb_node *old = node, *left;
-
- node = node->rb_right;
- while ((left = node->rb_left) != NULL)
- node = left;
- child = node->rb_right;
- parent = rb_parent(node);
- color = rb_color(node);
-
- if (child)
- rb_set_parent(child, parent);
- if (parent == old) {
- parent->rb_right = child;
- parent = node;
- } else
- parent->rb_left = child;
-
- node->rb_parent_color = old->rb_parent_color;
- node->rb_right = old->rb_right;
- node->rb_left = old->rb_left;
-
- if (rb_parent(old))
- {
- if (rb_parent(old)->rb_left == old)
- rb_parent(old)->rb_left = node;
- else
- rb_parent(old)->rb_right = node;
- } else
- root->rb_node = node;
-
- rb_set_parent(old->rb_left, node);
- if (old->rb_right)
- rb_set_parent(old->rb_right, node);
- goto color;
- }
-
- parent = rb_parent(node);
- color = rb_color(node);
-
- if (child)
- rb_set_parent(child, parent);
- if (parent)
- {
- if (parent->rb_left == node)
- parent->rb_left = child;
- else
- parent->rb_right = child;
- }
- else
- root->rb_node = child;
-
- color:
- if (color == RB_BLACK)
- __rb_erase_color(child, parent, root);
-}
-
-/*
- * This function returns the first node (in sort order) of the tree.
- */
-struct rb_node *rb_first(const struct rb_root *root)
-{
- struct rb_node *n;
-
- n = root->rb_node;
- if (!n)
- return NULL;
- while (n->rb_left)
- n = n->rb_left;
- return n;
-}
-
-struct rb_node *rb_last(const struct rb_root *root)
-{
- struct rb_node *n;
-
- n = root->rb_node;
- if (!n)
- return NULL;
- while (n->rb_right)
- n = n->rb_right;
- return n;
-}
-
-struct rb_node *rb_next(const struct rb_node *node)
-{
- struct rb_node *parent;
-
- if (rb_parent(node) == node)
- return NULL;
-
- /* If we have a right-hand child, go down and then left as far
- as we can. */
- if (node->rb_right) {
- node = node->rb_right;
- while (node->rb_left)
- node=node->rb_left;
- return (struct rb_node *)node;
- }
-
- /* No right-hand children. Everything down and left is
- smaller than us, so any 'next' node must be in the general
- direction of our parent. Go up the tree; any time the
- ancestor is a right-hand child of its parent, keep going
- up. First time it's a left-hand child of its parent, said
- parent is our 'next' node. */
- while ((parent = rb_parent(node)) && node == parent->rb_right)
- node = parent;
-
- return parent;
-}
-
-struct rb_node *rb_prev(const struct rb_node *node)
-{
- struct rb_node *parent;
-
- if (rb_parent(node) == node)
- return NULL;
-
- /* If we have a left-hand child, go down and then right as far
- as we can. */
- if (node->rb_left) {
- node = node->rb_left;
- while (node->rb_right)
- node=node->rb_right;
- return (struct rb_node *)node;
- }
-
- /* No left-hand children. Go up till we find an ancestor which
- is a right-hand child of its parent */
- while ((parent = rb_parent(node)) && node == parent->rb_left)
- node = parent;
-
- return parent;
-}
-
-void rb_replace_node(struct rb_node *victim, struct rb_node *new,
- struct rb_root *root)
-{
- struct rb_node *parent = rb_parent(victim);
-
- /* Set the surrounding nodes to point to the replacement */
- if (parent) {
- if (victim == parent->rb_left)
- parent->rb_left = new;
- else
- parent->rb_right = new;
- } else {
- root->rb_node = new;
- }
- if (victim->rb_left)
- rb_set_parent(victim->rb_left, new);
- if (victim->rb_right)
- rb_set_parent(victim->rb_right, new);
-
- /* Copy the pointers/colour from the victim to the replacement */
- *new = *victim;
-}
diff --git a/tools/perf/util/rbtree.h b/tools/perf/util/rbtree.h
deleted file mode 100644
index 6bdc488a47fb..000000000000
--- a/tools/perf/util/rbtree.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- Red Black Trees
- (C) 1999 Andrea Arcangeli <andrea@suse.de>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- linux/include/linux/rbtree.h
-
- To use rbtrees you'll have to implement your own insert and search cores.
- This will avoid us to use callbacks and to drop drammatically performances.
- I know it's not the cleaner way, but in C (not in C++) to get
- performances and genericity...
-
- Some example of insert and search follows here. The search is a plain
- normal search over an ordered tree. The insert instead must be implemented
- int two steps: as first thing the code must insert the element in
- order as a red leaf in the tree, then the support library function
- rb_insert_color() must be called. Such function will do the
- not trivial work to rebalance the rbtree if necessary.
-
------------------------------------------------------------------------
-static inline struct page * rb_search_page_cache(struct inode * inode,
- unsigned long offset)
-{
- struct rb_node * n = inode->i_rb_page_cache.rb_node;
- struct page * page;
-
- while (n)
- {
- page = rb_entry(n, struct page, rb_page_cache);
-
- if (offset < page->offset)
- n = n->rb_left;
- else if (offset > page->offset)
- n = n->rb_right;
- else
- return page;
- }
- return NULL;
-}
-
-static inline struct page * __rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- struct rb_node * node)
-{
- struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
- struct rb_node * parent = NULL;
- struct page * page;
-
- while (*p)
- {
- parent = *p;
- page = rb_entry(parent, struct page, rb_page_cache);
-
- if (offset < page->offset)
- p = &(*p)->rb_left;
- else if (offset > page->offset)
- p = &(*p)->rb_right;
- else
- return page;
- }
-
- rb_link_node(node, parent, p);
-
- return NULL;
-}
-
-static inline struct page * rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- struct rb_node * node)
-{
- struct page * ret;
- if ((ret = __rb_insert_page_cache(inode, offset, node)))
- goto out;
- rb_insert_color(node, &inode->i_rb_page_cache);
- out:
- return ret;
-}
------------------------------------------------------------------------
-*/
-
-#ifndef _LINUX_RBTREE_H
-#define _LINUX_RBTREE_H
-
-#include <stddef.h>
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
-
-struct rb_node
-{
- unsigned long rb_parent_color;
-#define RB_RED 0
-#define RB_BLACK 1
- struct rb_node *rb_right;
- struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
- /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root
-{
- struct rb_node *rb_node;
-};
-
-
-#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3))
-#define rb_color(r) ((r)->rb_parent_color & 1)
-#define rb_is_red(r) (!rb_color(r))
-#define rb_is_black(r) rb_color(r)
-#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0)
-#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0)
-
-static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
-{
- rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
-}
-static inline void rb_set_color(struct rb_node *rb, int color)
-{
- rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
-}
-
-#define RB_ROOT (struct rb_root) { NULL, }
-#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
-#define RB_EMPTY_NODE(node) (rb_parent(node) == node)
-#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
-
-extern void rb_insert_color(struct rb_node *, struct rb_root *);
-extern void rb_erase(struct rb_node *, struct rb_root *);
-
-/* Find logical next and previous nodes in a tree */
-extern struct rb_node *rb_next(const struct rb_node *);
-extern struct rb_node *rb_prev(const struct rb_node *);
-extern struct rb_node *rb_first(const struct rb_root *);
-extern struct rb_node *rb_last(const struct rb_root *);
-
-/* Fast replacement of a single node without remove/rebalance/add/rebalance */
-extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
- struct rb_root *root);
-
-static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
- struct rb_node ** rb_link)
-{
- node->rb_parent_color = (unsigned long )parent;
- node->rb_left = node->rb_right = NULL;
-
- *rb_link = node;
-}
-
-#endif /* _LINUX_RBTREE_H */
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index b2f5e854f40a..a3935343091a 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -65,7 +65,6 @@ int start_command(struct child_process *cmd)
cmd->err = fderr[0];
}
-#ifndef __MINGW32__
fflush(NULL);
cmd->pid = fork();
if (!cmd->pid) {
@@ -118,71 +117,6 @@ int start_command(struct child_process *cmd)
}
exit(127);
}
-#else
- int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */
- const char **sargv = cmd->argv;
- char **env = environ;
-
- if (cmd->no_stdin) {
- s0 = dup(0);
- dup_devnull(0);
- } else if (need_in) {
- s0 = dup(0);
- dup2(fdin[0], 0);
- } else if (cmd->in) {
- s0 = dup(0);
- dup2(cmd->in, 0);
- }
-
- if (cmd->no_stderr) {
- s2 = dup(2);
- dup_devnull(2);
- } else if (need_err) {
- s2 = dup(2);
- dup2(fderr[1], 2);
- }
-
- if (cmd->no_stdout) {
- s1 = dup(1);
- dup_devnull(1);
- } else if (cmd->stdout_to_stderr) {
- s1 = dup(1);
- dup2(2, 1);
- } else if (need_out) {
- s1 = dup(1);
- dup2(fdout[1], 1);
- } else if (cmd->out > 1) {
- s1 = dup(1);
- dup2(cmd->out, 1);
- }
-
- if (cmd->dir)
- die("chdir in start_command() not implemented");
- if (cmd->env) {
- env = copy_environ();
- for (; *cmd->env; cmd->env++)
- env = env_setenv(env, *cmd->env);
- }
-
- if (cmd->perf_cmd) {
- cmd->argv = prepare_perf_cmd(cmd->argv);
- }
-
- cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env);
-
- if (cmd->env)
- free_environ(env);
- if (cmd->perf_cmd)
- free(cmd->argv);
-
- cmd->argv = sargv;
- if (s0 >= 0)
- dup2(s0, 0), close(s0);
- if (s1 >= 0)
- dup2(s1, 1), close(s1);
- if (s2 >= 0)
- dup2(s2, 2), close(s2);
-#endif
if (cmd->pid < 0) {
int err = errno;
@@ -288,14 +222,6 @@ int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const
return run_command(&cmd);
}
-#ifdef __MINGW32__
-static __stdcall unsigned run_thread(void *data)
-{
- struct async *async = data;
- return async->proc(async->fd_for_proc, async->data);
-}
-#endif
-
int start_async(struct async *async)
{
int pipe_out[2];
@@ -304,7 +230,6 @@ int start_async(struct async *async)
return error("cannot create pipe: %s", strerror(errno));
async->out = pipe_out[0];
-#ifndef __MINGW32__
/* Flush stdio before fork() to avoid cloning buffers */
fflush(NULL);
@@ -319,33 +244,17 @@ int start_async(struct async *async)
exit(!!async->proc(pipe_out[1], async->data));
}
close(pipe_out[1]);
-#else
- async->fd_for_proc = pipe_out[1];
- async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL);
- if (!async->tid) {
- error("cannot create thread: %s", strerror(errno));
- close_pair(pipe_out);
- return -1;
- }
-#endif
+
return 0;
}
int finish_async(struct async *async)
{
-#ifndef __MINGW32__
int ret = 0;
if (wait_or_whine(async->pid))
ret = error("waitpid (async) failed");
-#else
- DWORD ret = 0;
- if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0)
- ret = error("waiting for thread failed: %lu", GetLastError());
- else if (!GetExitCodeThread(async->tid, &ret))
- ret = error("cannot get thread exit code: %lu", GetLastError());
- CloseHandle(async->tid);
-#endif
+
return ret;
}
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
index 328289f23669..cc1837deba88 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/perf/util/run-command.h
@@ -79,12 +79,7 @@ struct async {
int (*proc)(int fd, void *data);
void *data;
int out; /* caller reads from here and closes it */
-#ifndef __MINGW32__
pid_t pid;
-#else
- HANDLE tid;
- int fd_for_proc;
-#endif
};
int start_async(struct async *async);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index eaba09306802..5249d5a1b0c2 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -16,7 +16,7 @@ int prefixcmp(const char *str, const char *prefix)
*/
char strbuf_slopbuf[1];
-void strbuf_init(struct strbuf *sb, size_t hint)
+void strbuf_init(struct strbuf *sb, ssize_t hint)
{
sb->alloc = sb->len = 0;
sb->buf = strbuf_slopbuf;
@@ -92,7 +92,8 @@ void strbuf_ltrim(struct strbuf *sb)
void strbuf_tolower(struct strbuf *sb)
{
- int i;
+ unsigned int i;
+
for (i = 0; i < sb->len; i++)
sb->buf[i] = tolower(sb->buf[i]);
}
@@ -259,12 +260,12 @@ size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f)
res = fread(sb->buf + sb->len, 1, size, f);
if (res > 0)
strbuf_setlen(sb, sb->len + res);
- else if (res < 0 && oldalloc == 0)
+ else if (oldalloc == 0)
strbuf_release(sb);
return res;
}
-ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint)
+ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
{
size_t oldlen = sb->len;
size_t oldalloc = sb->alloc;
@@ -293,7 +294,7 @@ ssize_t strbuf_read(struct strbuf *sb, int fd, size_t hint)
#define STRBUF_MAXLINK (2*PATH_MAX)
-int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
+int strbuf_readlink(struct strbuf *sb, const char *path, ssize_t hint)
{
size_t oldalloc = sb->alloc;
@@ -301,7 +302,7 @@ int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
hint = 32;
while (hint < STRBUF_MAXLINK) {
- int len;
+ ssize_t len;
strbuf_grow(sb, hint);
len = readlink(path, sb->buf, hint);
@@ -343,7 +344,7 @@ int strbuf_getline(struct strbuf *sb, FILE *fp, int term)
return 0;
}
-int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint)
+int strbuf_read_file(struct strbuf *sb, const char *path, ssize_t hint)
{
int fd, len;
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index 9ee908a3ec5d..d2aa86c014c1 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -50,7 +50,7 @@ struct strbuf {
#define STRBUF_INIT { 0, 0, strbuf_slopbuf }
/*----- strbuf life cycle -----*/
-extern void strbuf_init(struct strbuf *, size_t);
+extern void strbuf_init(struct strbuf *buf, ssize_t hint);
extern void strbuf_release(struct strbuf *);
extern char *strbuf_detach(struct strbuf *, size_t *);
extern void strbuf_attach(struct strbuf *, void *, size_t, size_t);
@@ -61,7 +61,7 @@ static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) {
}
/*----- strbuf size related -----*/
-static inline size_t strbuf_avail(const struct strbuf *sb) {
+static inline ssize_t strbuf_avail(const struct strbuf *sb) {
return sb->alloc ? sb->alloc - sb->len - 1 : 0;
}
@@ -122,9 +122,9 @@ extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
extern size_t strbuf_fread(struct strbuf *, size_t, FILE *);
/* XXX: if read fails, any partial read is undone */
-extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint);
-extern int strbuf_read_file(struct strbuf *sb, const char *path, size_t hint);
-extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint);
+extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
+extern int strbuf_read_file(struct strbuf *sb, const char *path, ssize_t hint);
+extern int strbuf_readlink(struct strbuf *sb, const char *path, ssize_t hint);
extern int strbuf_getline(struct strbuf *, FILE *, int);
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 37b03255b425..3dca2f654cd0 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,7 +1,7 @@
#ifndef _PERF_STRING_H_
#define _PERF_STRING_H_
-#include "../types.h"
+#include "types.h"
int hex2u64(const char *ptr, u64 *val);
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
new file mode 100644
index 000000000000..025a78edfffe
--- /dev/null
+++ b/tools/perf/util/strlist.c
@@ -0,0 +1,184 @@
+/*
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include "strlist.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct str_node *str_node__new(const char *s, bool dupstr)
+{
+ struct str_node *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ if (dupstr) {
+ s = strdup(s);
+ if (s == NULL)
+ goto out_delete;
+ }
+ self->s = s;
+ }
+
+ return self;
+
+out_delete:
+ free(self);
+ return NULL;
+}
+
+static void str_node__delete(struct str_node *self, bool dupstr)
+{
+ if (dupstr)
+ free((void *)self->s);
+ free(self);
+}
+
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct str_node *sn;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, new_entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ sn = str_node__new(new_entry, self->dupstr);
+ if (sn == NULL)
+ return -ENOMEM;
+
+ rb_link_node(&sn->rb_node, parent, p);
+ rb_insert_color(&sn->rb_node, &self->entries);
+
+ return 0;
+}
+
+int strlist__load(struct strlist *self, const char *filename)
+{
+ char entry[1024];
+ int err;
+ FILE *fp = fopen(filename, "r");
+
+ if (fp == NULL)
+ return errno;
+
+ while (fgets(entry, sizeof(entry), fp) != NULL) {
+ const size_t len = strlen(entry);
+
+ if (len == 0)
+ continue;
+ entry[len - 1] = '\0';
+
+ err = strlist__add(self, entry);
+ if (err != 0)
+ goto out;
+ }
+
+ err = 0;
+out:
+ fclose(fp);
+ return err;
+}
+
+void strlist__remove(struct strlist *self, struct str_node *sn)
+{
+ rb_erase(&sn->rb_node, &self->entries);
+ str_node__delete(sn, self->dupstr);
+}
+
+bool strlist__has_entry(struct strlist *self, const char *entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ struct str_node *sn;
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return true;
+ }
+
+ return false;
+}
+
+static int strlist__parse_list_entry(struct strlist *self, const char *s)
+{
+ if (strncmp(s, "file://", 7) == 0)
+ return strlist__load(self, s + 7);
+
+ return strlist__add(self, s);
+}
+
+int strlist__parse_list(struct strlist *self, const char *s)
+{
+ char *sep;
+ int err;
+
+ while ((sep = strchr(s, ',')) != NULL) {
+ *sep = '\0';
+ err = strlist__parse_list_entry(self, s);
+ *sep = ',';
+ if (err != 0)
+ return err;
+ s = sep + 1;
+ }
+
+ return *s ? strlist__parse_list_entry(self, s) : 0;
+}
+
+struct strlist *strlist__new(bool dupstr, const char *slist)
+{
+ struct strlist *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->entries = RB_ROOT;
+ self->dupstr = dupstr;
+ if (slist && strlist__parse_list(self, slist) != 0)
+ goto out_error;
+ }
+
+ return self;
+out_error:
+ free(self);
+ return NULL;
+}
+
+void strlist__delete(struct strlist *self)
+{
+ if (self != NULL) {
+ struct str_node *pos;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next) {
+ pos = rb_entry(next, struct str_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ strlist__remove(self, pos);
+ }
+ self->entries = RB_ROOT;
+ free(self);
+ }
+}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
new file mode 100644
index 000000000000..2fdcfee87586
--- /dev/null
+++ b/tools/perf/util/strlist.h
@@ -0,0 +1,32 @@
+#ifndef STRLIST_H_
+#define STRLIST_H_
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+struct str_node {
+ struct rb_node rb_node;
+ const char *s;
+};
+
+struct strlist {
+ struct rb_root entries;
+ bool dupstr;
+};
+
+struct strlist *strlist__new(bool dupstr, const char *slist);
+void strlist__delete(struct strlist *self);
+
+void strlist__remove(struct strlist *self, struct str_node *sn);
+int strlist__load(struct strlist *self, const char *filename);
+int strlist__add(struct strlist *self, const char *str);
+
+bool strlist__has_entry(struct strlist *self, const char *entry);
+
+static inline bool strlist__empty(const struct strlist *self)
+{
+ return rb_first(&self->entries) == NULL;
+}
+
+int strlist__parse_list(struct strlist *self, const char *s);
+#endif /* STRLIST_H_ */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 86e14375e74e..4683b67b5ee4 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -35,7 +35,7 @@ static struct symbol *symbol__new(u64 start, u64 len,
self = ((void *)self) + priv_size;
}
self->start = start;
- self->end = start + len - 1;
+ self->end = len ? start + len - 1 : start;
memcpy(self->name, name, namelen);
return self;
@@ -48,8 +48,12 @@ static void symbol__delete(struct symbol *self, unsigned int priv_size)
static size_t symbol__fprintf(struct symbol *self, FILE *fp)
{
- return fprintf(fp, " %llx-%llx %s\n",
+ if (!self->module)
+ return fprintf(fp, " %llx-%llx %s\n",
self->start, self->end, self->name);
+ else
+ return fprintf(fp, " %llx-%llx %s \t[%s]\n",
+ self->start, self->end, self->name, self->module->name);
}
struct dso *dso__new(const char *name, unsigned int sym_priv_size)
@@ -146,6 +150,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
char *line = NULL;
size_t n;
FILE *file = fopen("/proc/kallsyms", "r");
+ int count = 0;
if (file == NULL)
goto out_failure;
@@ -188,8 +193,10 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
if (filter && filter(self, sym))
symbol__delete(sym, self->sym_priv_size);
- else
+ else {
dso__insert_symbol(self, sym);
+ count++;
+ }
}
/*
@@ -212,7 +219,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
free(line);
fclose(file);
- return 0;
+ return count;
out_delete_line:
free(line);
@@ -307,6 +314,26 @@ static inline int elf_sym__is_function(const GElf_Sym *sym)
sym->st_size != 0;
}
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
static inline const char *elf_sym__name(const GElf_Sym *sym,
const Elf_Data *symstrs)
{
@@ -448,9 +475,9 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf,
}
static int dso__load_sym(struct dso *self, int fd, const char *name,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int verbose, struct module *mod)
{
- Elf_Data *symstrs;
+ Elf_Data *symstrs, *secstrs;
uint32_t nr_syms;
int err = -1;
uint32_t index;
@@ -458,7 +485,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
GElf_Shdr shdr;
Elf_Data *syms;
GElf_Sym sym;
- Elf_Scn *sec, *sec_dynsym;
+ Elf_Scn *sec, *sec_dynsym, *sec_strndx;
Elf *elf;
size_t dynsym_idx;
int nr = 0;
@@ -517,15 +544,29 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
if (symstrs == NULL)
goto out_elf_end;
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
-
+ self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL);
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
struct symbol *f;
u64 obj_start;
+ struct section *section = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
- if (!elf_sym__is_function(&sym))
+ if (!is_label && !elf_sym__is_function(&sym))
continue;
sec = elf_getscn(elf, sym.st_shndx);
@@ -533,9 +574,31 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
goto out_elf_end;
gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_text(&shdr, secstrs))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
obj_start = sym.st_value;
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ if (self->adjust_symbols) {
+ if (verbose >= 2)
+ printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
+ (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
+
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
+
+ if (mod) {
+ section = mod->sections->find_section(mod->sections, section_name);
+ if (section)
+ sym.st_value += section->vma;
+ else {
+ fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n",
+ mod->name, section_name);
+ goto out_elf_end;
+ }
+ }
f = symbol__new(sym.st_value, sym.st_size,
elf_sym__name(&sym, symstrs),
@@ -546,6 +609,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
if (filter && filter(self, f))
symbol__delete(f, self->sym_priv_size);
else {
+ f->module = mod;
dso__insert_symbol(self, f);
nr++;
}
@@ -569,6 +633,8 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
if (!name)
return -1;
+ self->adjust_symbols = 0;
+
if (strncmp(self->name, "/tmp/perf-", 10) == 0)
return dso__load_perf_map(self, filter, verbose);
@@ -593,7 +659,7 @@ more:
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, fd, name, filter, verbose);
+ ret = dso__load_sym(self, fd, name, filter, verbose, NULL);
close(fd);
/*
@@ -607,6 +673,86 @@ out:
return ret;
}
+static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
+ symbol_filter_t filter, int verbose)
+{
+ struct module *mod = mod_dso__find_module(mods, name);
+ int err = 0, fd;
+
+ if (mod == NULL || !mod->active)
+ return err;
+
+ fd = open(mod->path, O_RDONLY);
+
+ if (fd < 0)
+ return err;
+
+ err = dso__load_sym(self, fd, name, filter, verbose, mod);
+ close(fd);
+
+ return err;
+}
+
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose)
+{
+ struct mod_dso *mods = mod_dso__new_dso("modules");
+ struct module *pos;
+ struct rb_node *next;
+ int err;
+
+ err = mod_dso__load_modules(mods);
+
+ if (err <= 0)
+ return err;
+
+ /*
+ * Iterate over modules, and load active symbols.
+ */
+ next = rb_first(&mods->mods);
+ while (next) {
+ pos = rb_entry(next, struct module, rb_node);
+ err = dso__load_module(self, mods, pos->name, filter, verbose);
+
+ if (err < 0)
+ break;
+
+ next = rb_next(&pos->rb_node);
+ }
+
+ if (err < 0) {
+ mod_dso__delete_modules(mods);
+ mod_dso__delete_self(mods);
+ }
+
+ return err;
+}
+
+static inline void dso__fill_symbol_holes(struct dso *self)
+{
+ struct symbol *prev = NULL;
+ struct rb_node *nd;
+
+ for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev) {
+ u64 hole = 0;
+ int alias = pos->start == prev->start;
+
+ if (!alias)
+ hole = prev->start - pos->end - 1;
+
+ if (hole || alias) {
+ if (alias)
+ pos->end = prev->end;
+ else if (hole)
+ pos->end = prev->start - 1;
+ }
+ }
+ prev = pos;
+ }
+}
+
static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
symbol_filter_t filter, int verbose)
{
@@ -615,21 +761,28 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
if (fd < 0)
return -1;
- err = dso__load_sym(self, fd, vmlinux, filter, verbose);
+ err = dso__load_sym(self, fd, vmlinux, filter, verbose, NULL);
+
+ if (err > 0)
+ dso__fill_symbol_holes(self);
+
close(fd);
return err;
}
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int verbose, int modules)
{
int err = -1;
- if (vmlinux)
+ if (vmlinux) {
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
+ if (err > 0 && modules)
+ err = dso__load_modules(self, filter, verbose);
+ }
- if (err)
+ if (err <= 0)
err = dso__load_kallsyms(self, filter, verbose);
return err;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index ea332e56e458..7918cffb23cd 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -2,9 +2,10 @@
#define _PERF_SYMBOL_ 1
#include <linux/types.h>
-#include "../types.h"
-#include "list.h"
-#include "rbtree.h"
+#include "types.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include "module.h"
struct symbol {
struct rb_node rb_node;
@@ -13,6 +14,7 @@ struct symbol {
u64 obj_start;
u64 hist_sum;
u64 *hist;
+ struct module *module;
void *priv;
char name[0];
};
@@ -20,8 +22,9 @@ struct symbol {
struct dso {
struct list_head node;
struct rb_root syms;
- unsigned int sym_priv_size;
struct symbol *(*find_symbol)(struct dso *, u64 ip);
+ unsigned int sym_priv_size;
+ unsigned char adjust_symbols;
char name[0];
};
@@ -40,7 +43,8 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym)
struct symbol *dso__find_symbol(struct dso *self, u64 ip);
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose);
+ symbol_filter_t filter, int verbose, int modules);
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
size_t dso__fprintf(struct dso *self, FILE *fp);
diff --git a/tools/perf/types.h b/tools/perf/util/types.h
index 5e75f9005940..5e75f9005940 100644
--- a/tools/perf/types.h
+++ b/tools/perf/util/types.h
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b8cfed776d81..b4be6071c105 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -67,7 +67,6 @@
#include <assert.h>
#include <regex.h>
#include <utime.h>
-#ifndef __MINGW32__
#include <sys/wait.h>
#include <sys/poll.h>
#include <sys/socket.h>
@@ -81,20 +80,6 @@
#include <netdb.h>
#include <pwd.h>
#include <inttypes.h>
-#if defined(__CYGWIN__)
-#undef _XOPEN_SOURCE
-#include <grp.h>
-#define _XOPEN_SOURCE 600
-#include "compat/cygwin.h"
-#else
-#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */
-#include <grp.h>
-#define _ALL_SOURCE 1
-#endif
-#else /* __MINGW32__ */
-/* pull in Windows compatibility stuff */
-#include "compat/mingw.h"
-#endif /* __MINGW32__ */
#ifndef NO_ICONV
#include <iconv.h>
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
index 6350d65f6d9e..4574ac28396f 100644
--- a/tools/perf/util/wrapper.c
+++ b/tools/perf/util/wrapper.c
@@ -7,7 +7,7 @@
* There's no pack memory to release - but stay close to the Git
* version so wrap this away:
*/
-static inline void release_pack_memory(size_t size, int flag)
+static inline void release_pack_memory(size_t size __used, int flag __used)
{
}
@@ -59,7 +59,8 @@ void *xmemdupz(const void *data, size_t len)
char *xstrndup(const char *str, size_t len)
{
char *p = memchr(str, '\0', len);
- return xmemdupz(str, p ? p - str : len);
+
+ return xmemdupz(str, p ? (size_t)(p - str) : len);
}
void *xrealloc(void *ptr, size_t size)
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
new file mode 100644
index 000000000000..daece36c0a57
--- /dev/null
+++ b/virt/kvm/Kconfig
@@ -0,0 +1,14 @@
+# KVM common configuration items and defaults
+
+config HAVE_KVM
+ bool
+
+config HAVE_KVM_IRQCHIP
+ bool
+
+config HAVE_KVM_EVENTFD
+ bool
+ select EVENTFD
+
+config KVM_APIC_ARCHITECTURE
+ bool
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 5ae620d32fac..0352f81ecc0b 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -14,32 +14,28 @@
#include "coalesced_mmio.h"
-static int coalesced_mmio_in_range(struct kvm_io_device *this,
- gpa_t addr, int len, int is_write)
+static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
+{
+ return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
+}
+
+static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
+ gpa_t addr, int len)
{
- struct kvm_coalesced_mmio_dev *dev =
- (struct kvm_coalesced_mmio_dev*)this->private;
struct kvm_coalesced_mmio_zone *zone;
- int next;
+ struct kvm_coalesced_mmio_ring *ring;
+ unsigned avail;
int i;
- if (!is_write)
- return 0;
-
- /* kvm->lock is taken by the caller and must be not released before
- * dev.read/write
- */
-
/* Are we able to batch it ? */
/* last is the first free entry
* check if we don't meet the first used entry
* there is always one unused entry in the buffer
*/
-
- next = (dev->kvm->coalesced_mmio_ring->last + 1) %
- KVM_COALESCED_MMIO_MAX;
- if (next == dev->kvm->coalesced_mmio_ring->first) {
+ ring = dev->kvm->coalesced_mmio_ring;
+ avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+ if (avail < KVM_MAX_VCPUS) {
/* full */
return 0;
}
@@ -60,14 +56,15 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
return 0;
}
-static void coalesced_mmio_write(struct kvm_io_device *this,
- gpa_t addr, int len, const void *val)
+static int coalesced_mmio_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *val)
{
- struct kvm_coalesced_mmio_dev *dev =
- (struct kvm_coalesced_mmio_dev*)this->private;
+ struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+ if (!coalesced_mmio_in_range(dev, addr, len))
+ return -EOPNOTSUPP;
- /* kvm->lock must be taken by caller before call to in_range()*/
+ spin_lock(&dev->lock);
/* copy data in first free entry of the ring */
@@ -76,13 +73,22 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+ spin_unlock(&dev->lock);
+ return 0;
}
static void coalesced_mmio_destructor(struct kvm_io_device *this)
{
- kfree(this);
+ struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
+
+ kfree(dev);
}
+static const struct kvm_io_device_ops coalesced_mmio_ops = {
+ .write = coalesced_mmio_write,
+ .destructor = coalesced_mmio_destructor,
+};
+
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;
@@ -90,13 +96,11 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- dev->dev.write = coalesced_mmio_write;
- dev->dev.in_range = coalesced_mmio_in_range;
- dev->dev.destructor = coalesced_mmio_destructor;
- dev->dev.private = dev;
+ spin_lock_init(&dev->lock);
+ kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
- kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
+ kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
return 0;
}
@@ -109,16 +113,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- mutex_lock(&kvm->lock);
+ down_write(&kvm->slots_lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- mutex_unlock(&kvm->lock);
+ up_write(&kvm->slots_lock);
return -ENOBUFS;
}
dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;
- mutex_unlock(&kvm->lock);
+ up_write(&kvm->slots_lock);
return 0;
}
@@ -132,7 +136,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- mutex_lock(&kvm->lock);
+ down_write(&kvm->slots_lock);
i = dev->nb_zones;
while(i) {
@@ -150,7 +154,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
i--;
}
- mutex_unlock(&kvm->lock);
+ up_write(&kvm->slots_lock);
return 0;
}
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 5ac0ec628461..4b49f27fa31e 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -12,6 +12,7 @@
struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev;
struct kvm *kvm;
+ spinlock_t lock;
int nb_zones;
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
};
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
new file mode 100644
index 000000000000..4092b8dcd510
--- /dev/null
+++ b/virt/kvm/eventfd.c
@@ -0,0 +1,329 @@
+/*
+ * kvm eventfd support - use eventfd objects to signal various KVM events
+ *
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/workqueue.h>
+#include <linux/syscalls.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/eventfd.h>
+
+/*
+ * --------------------------------------------------------------------
+ * irqfd: Allows an fd to be used to inject an interrupt to the guest
+ *
+ * Credit goes to Avi Kivity for the original idea.
+ * --------------------------------------------------------------------
+ */
+
+struct _irqfd {
+ struct kvm *kvm;
+ struct eventfd_ctx *eventfd;
+ int gsi;
+ struct list_head list;
+ poll_table pt;
+ wait_queue_head_t *wqh;
+ wait_queue_t wait;
+ struct work_struct inject;
+ struct work_struct shutdown;
+};
+
+static struct workqueue_struct *irqfd_cleanup_wq;
+
+static void
+irqfd_inject(struct work_struct *work)
+{
+ struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
+ struct kvm *kvm = irqfd->kvm;
+
+ mutex_lock(&kvm->irq_lock);
+ kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
+ kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
+ mutex_unlock(&kvm->irq_lock);
+}
+
+/*
+ * Race-free decouple logic (ordering is critical)
+ */
+static void
+irqfd_shutdown(struct work_struct *work)
+{
+ struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
+
+ /*
+ * Synchronize with the wait-queue and unhook ourselves to prevent
+ * further events.
+ */
+ remove_wait_queue(irqfd->wqh, &irqfd->wait);
+
+ /*
+ * We know no new events will be scheduled at this point, so block
+ * until all previously outstanding events have completed
+ */
+ flush_work(&irqfd->inject);
+
+ /*
+ * It is now safe to release the object's resources
+ */
+ eventfd_ctx_put(irqfd->eventfd);
+ kfree(irqfd);
+}
+
+
+/* assumes kvm->irqfds.lock is held */
+static bool
+irqfd_is_active(struct _irqfd *irqfd)
+{
+ return list_empty(&irqfd->list) ? false : true;
+}
+
+/*
+ * Mark the irqfd as inactive and schedule it for removal
+ *
+ * assumes kvm->irqfds.lock is held
+ */
+static void
+irqfd_deactivate(struct _irqfd *irqfd)
+{
+ BUG_ON(!irqfd_is_active(irqfd));
+
+ list_del_init(&irqfd->list);
+
+ queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
+}
+
+/*
+ * Called with wqh->lock held and interrupts disabled
+ */
+static int
+irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
+ unsigned long flags = (unsigned long)key;
+
+ if (flags & POLLIN)
+ /* An event has been signaled, inject an interrupt */
+ schedule_work(&irqfd->inject);
+
+ if (flags & POLLHUP) {
+ /* The eventfd is closing, detach from KVM */
+ struct kvm *kvm = irqfd->kvm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvm->irqfds.lock, flags);
+
+ /*
+ * We must check if someone deactivated the irqfd before
+ * we could acquire the irqfds.lock since the item is
+ * deactivated from the KVM side before it is unhooked from
+ * the wait-queue. If it is already deactivated, we can
+ * simply return knowing the other side will cleanup for us.
+ * We cannot race against the irqfd going away since the
+ * other side is required to acquire wqh->lock, which we hold
+ */
+ if (irqfd_is_active(irqfd))
+ irqfd_deactivate(irqfd);
+
+ spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
+ }
+
+ return 0;
+}
+
+static void
+irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
+
+ irqfd->wqh = wqh;
+ add_wait_queue(wqh, &irqfd->wait);
+}
+
+static int
+kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+{
+ struct _irqfd *irqfd;
+ struct file *file = NULL;
+ struct eventfd_ctx *eventfd = NULL;
+ int ret;
+ unsigned int events;
+
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ if (!irqfd)
+ return -ENOMEM;
+
+ irqfd->kvm = kvm;
+ irqfd->gsi = gsi;
+ INIT_LIST_HEAD(&irqfd->list);
+ INIT_WORK(&irqfd->inject, irqfd_inject);
+ INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
+
+ file = eventfd_fget(fd);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto fail;
+ }
+
+ eventfd = eventfd_ctx_fileget(file);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto fail;
+ }
+
+ irqfd->eventfd = eventfd;
+
+ /*
+ * Install our own custom wake-up handling so we are notified via
+ * a callback whenever someone signals the underlying eventfd
+ */
+ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
+ init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
+
+ events = file->f_op->poll(file, &irqfd->pt);
+
+ spin_lock_irq(&kvm->irqfds.lock);
+ list_add_tail(&irqfd->list, &kvm->irqfds.items);
+ spin_unlock_irq(&kvm->irqfds.lock);
+
+ /*
+ * Check if there was an event already pending on the eventfd
+ * before we registered, and trigger it as if we didn't miss it.
+ */
+ if (events & POLLIN)
+ schedule_work(&irqfd->inject);
+
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the POLLHUP
+ */
+ fput(file);
+
+ return 0;
+
+fail:
+ if (eventfd && !IS_ERR(eventfd))
+ eventfd_ctx_put(eventfd);
+
+ if (file && !IS_ERR(file))
+ fput(file);
+
+ kfree(irqfd);
+ return ret;
+}
+
+void
+kvm_irqfd_init(struct kvm *kvm)
+{
+ spin_lock_init(&kvm->irqfds.lock);
+ INIT_LIST_HEAD(&kvm->irqfds.items);
+}
+
+/*
+ * shutdown any irqfd's that match fd+gsi
+ */
+static int
+kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+{
+ struct _irqfd *irqfd, *tmp;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ spin_lock_irq(&kvm->irqfds.lock);
+
+ list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
+ if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
+ irqfd_deactivate(irqfd);
+ }
+
+ spin_unlock_irq(&kvm->irqfds.lock);
+ eventfd_ctx_put(eventfd);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * so that we guarantee there will not be any more interrupts on this
+ * gsi once this deassign function returns.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+
+ return 0;
+}
+
+int
+kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+{
+ if (flags & KVM_IRQFD_FLAG_DEASSIGN)
+ return kvm_irqfd_deassign(kvm, fd, gsi);
+
+ return kvm_irqfd_assign(kvm, fd, gsi);
+}
+
+/*
+ * This function is called as the kvm VM fd is being released. Shutdown all
+ * irqfds that still remain open
+ */
+void
+kvm_irqfd_release(struct kvm *kvm)
+{
+ struct _irqfd *irqfd, *tmp;
+
+ spin_lock_irq(&kvm->irqfds.lock);
+
+ list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
+ irqfd_deactivate(irqfd);
+
+ spin_unlock_irq(&kvm->irqfds.lock);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * since we do not take a kvm* reference.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+
+}
+
+/*
+ * create a host-wide workqueue for issuing deferred shutdown requests
+ * aggregated from all vm* instances. We need our own isolated single-thread
+ * queue to prevent deadlock against flushing the normal work-queue.
+ */
+static int __init irqfd_module_init(void)
+{
+ irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit irqfd_module_exit(void)
+{
+ destroy_workqueue(irqfd_cleanup_wq);
+}
+
+module_init(irqfd_module_init);
+module_exit(irqfd_module_exit);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 1eddae94bab3..92496ff3d82d 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -95,8 +95,6 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
pent->fields.remote_irr = 1;
}
- if (!pent->fields.trig_mode)
- ioapic->irr &= ~(1 << idx);
return injected;
}
@@ -105,6 +103,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
bool mask_before, mask_after;
+ union kvm_ioapic_redirect_entry *e;
switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
@@ -124,19 +123,21 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
ioapic_debug("change redir index %x val %x\n", index, val);
if (index >= IOAPIC_NUM_PINS)
return;
- mask_before = ioapic->redirtbl[index].fields.mask;
+ e = &ioapic->redirtbl[index];
+ mask_before = e->fields.mask;
if (ioapic->ioregsel & 1) {
- ioapic->redirtbl[index].bits &= 0xffffffff;
- ioapic->redirtbl[index].bits |= (u64) val << 32;
+ e->bits &= 0xffffffff;
+ e->bits |= (u64) val << 32;
} else {
- ioapic->redirtbl[index].bits &= ~0xffffffffULL;
- ioapic->redirtbl[index].bits |= (u32) val;
- ioapic->redirtbl[index].fields.remote_irr = 0;
+ e->bits &= ~0xffffffffULL;
+ e->bits |= (u32) val;
+ e->fields.remote_irr = 0;
}
- mask_after = ioapic->redirtbl[index].fields.mask;
+ mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
- if (ioapic->irr & (1 << index))
+ if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
+ && ioapic->irr & (1 << index))
ioapic_service(ioapic, index);
break;
}
@@ -165,7 +166,9 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
/* Always delivery PIT interrupt to vcpu 0 */
if (irq == 0) {
irqe.dest_mode = 0; /* Physical mode. */
- irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
+ /* need to read apic_id from apic regiest since
+ * it can be rewritten */
+ irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
}
#endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
@@ -184,9 +187,10 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
if (!level)
ioapic->irr &= ~mask;
else {
+ int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
ioapic->irr |= mask;
- if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
- || !entry.fields.remote_irr)
+ if ((edge && old_irr != ioapic->irr) ||
+ (!edge && !entry.fields.remote_irr))
ret = ioapic_service(ioapic, irq);
}
}
@@ -220,24 +224,29 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
__kvm_ioapic_update_eoi(ioapic, i, trigger_mode);
}
-static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr,
- int len, int is_write)
+static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
{
- struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
+ return container_of(dev, struct kvm_ioapic, dev);
+}
+static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
+{
return ((addr >= ioapic->base_address &&
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}
-static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
- void *val)
+static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
+ void *val)
{
- struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
+ struct kvm_ioapic *ioapic = to_ioapic(this);
u32 result;
+ if (!ioapic_in_range(ioapic, addr))
+ return -EOPNOTSUPP;
ioapic_debug("addr %lx\n", (unsigned long)addr);
ASSERT(!(addr & 0xf)); /* check alignment */
+ mutex_lock(&ioapic->kvm->irq_lock);
addr &= 0xff;
switch (addr) {
case IOAPIC_REG_SELECT:
@@ -264,22 +273,28 @@ static void ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
default:
printk(KERN_WARNING "ioapic: wrong length %d\n", len);
}
+ mutex_unlock(&ioapic->kvm->irq_lock);
+ return 0;
}
-static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
- const void *val)
+static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
+ const void *val)
{
- struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
+ struct kvm_ioapic *ioapic = to_ioapic(this);
u32 data;
+ if (!ioapic_in_range(ioapic, addr))
+ return -EOPNOTSUPP;
ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
(void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */
+
+ mutex_lock(&ioapic->kvm->irq_lock);
if (len == 4 || len == 8)
data = *(u32 *) val;
else {
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
- return;
+ goto unlock;
}
addr &= 0xff;
@@ -300,6 +315,9 @@ static void ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
default:
break;
}
+unlock:
+ mutex_unlock(&ioapic->kvm->irq_lock);
+ return 0;
}
void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
@@ -314,6 +332,11 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->id = 0;
}
+static const struct kvm_io_device_ops ioapic_mmio_ops = {
+ .read = ioapic_mmio_read,
+ .write = ioapic_mmio_write,
+};
+
int kvm_ioapic_init(struct kvm *kvm)
{
struct kvm_ioapic *ioapic;
@@ -323,12 +346,9 @@ int kvm_ioapic_init(struct kvm *kvm)
return -ENOMEM;
kvm->arch.vioapic = ioapic;
kvm_ioapic_reset(ioapic);
- ioapic->dev.read = ioapic_mmio_read;
- ioapic->dev.write = ioapic_mmio_write;
- ioapic->dev.in_range = ioapic_in_range;
- ioapic->dev.private = ioapic;
+ kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
- kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
+ kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev);
return 0;
}
diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h
index 55e8846ac3a6..12fd3caffd2b 100644
--- a/virt/kvm/iodev.h
+++ b/virt/kvm/iodev.h
@@ -17,49 +17,54 @@
#define __KVM_IODEV_H__
#include <linux/kvm_types.h>
+#include <asm/errno.h>
-struct kvm_io_device {
- void (*read)(struct kvm_io_device *this,
+struct kvm_io_device;
+
+/**
+ * kvm_io_device_ops are called under kvm slots_lock.
+ * read and write handlers return 0 if the transaction has been handled,
+ * or non-zero to have it passed to the next device.
+ **/
+struct kvm_io_device_ops {
+ int (*read)(struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ void *val);
+ int (*write)(struct kvm_io_device *this,
gpa_t addr,
int len,
- void *val);
- void (*write)(struct kvm_io_device *this,
- gpa_t addr,
- int len,
- const void *val);
- int (*in_range)(struct kvm_io_device *this, gpa_t addr, int len,
- int is_write);
+ const void *val);
void (*destructor)(struct kvm_io_device *this);
+};
- void *private;
+
+struct kvm_io_device {
+ const struct kvm_io_device_ops *ops;
};
-static inline void kvm_iodevice_read(struct kvm_io_device *dev,
- gpa_t addr,
- int len,
- void *val)
+static inline void kvm_iodevice_init(struct kvm_io_device *dev,
+ const struct kvm_io_device_ops *ops)
{
- dev->read(dev, addr, len, val);
+ dev->ops = ops;
}
-static inline void kvm_iodevice_write(struct kvm_io_device *dev,
- gpa_t addr,
- int len,
- const void *val)
+static inline int kvm_iodevice_read(struct kvm_io_device *dev,
+ gpa_t addr, int l, void *v)
{
- dev->write(dev, addr, len, val);
+ return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP;
}
-static inline int kvm_iodevice_inrange(struct kvm_io_device *dev,
- gpa_t addr, int len, int is_write)
+static inline int kvm_iodevice_write(struct kvm_io_device *dev,
+ gpa_t addr, int l, const void *v)
{
- return dev->in_range(dev, addr, len, is_write);
+ return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP;
}
static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
{
- if (dev->destructor)
- dev->destructor(dev);
+ if (dev->ops->destructor)
+ dev->ops->destructor(dev);
}
#endif /* __KVM_IODEV_H__ */
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index a8bd466d00cc..c380bf006276 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -20,6 +20,7 @@
*/
#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
#include <asm/msidef.h>
#ifdef CONFIG_IA64
@@ -62,14 +63,14 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
int i, r = -1;
struct kvm_vcpu *vcpu, *lowest = NULL;
+ WARN_ON(!mutex_is_locked(&kvm->irq_lock));
+
if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
kvm_is_dm_lowest_prio(irq))
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
- vcpu = kvm->vcpus[i];
-
- if (!vcpu || !kvm_apic_present(vcpu))
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
continue;
if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
@@ -113,7 +114,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
}
-/* This should be called with the kvm->lock mutex held
+/* This should be called with the kvm->irq_lock mutex held
* Return value:
* < 0 Interrupt was ignored (masked or not delivered for other reasons)
* = 0 Interrupt was coalesced (previous irq is still pending)
@@ -125,6 +126,10 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
unsigned long *irq_state, sig_level;
int ret = -1;
+ trace_kvm_set_irq(irq, level, irq_source_id);
+
+ WARN_ON(!mutex_is_locked(&kvm->irq_lock));
+
if (irq < KVM_IOAPIC_NUM_PINS) {
irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
@@ -159,6 +164,8 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
struct hlist_node *n;
unsigned gsi = pin;
+ trace_kvm_ack_irq(irqchip, pin);
+
list_for_each_entry(e, &kvm->irq_routing, link)
if (e->irqchip.irqchip == irqchip &&
e->irqchip.pin == pin) {
@@ -174,19 +181,26 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian)
{
+ mutex_lock(&kvm->irq_lock);
hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
+ mutex_unlock(&kvm->irq_lock);
}
-void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
+void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
+ struct kvm_irq_ack_notifier *kian)
{
+ mutex_lock(&kvm->irq_lock);
hlist_del_init(&kian->link);
+ mutex_unlock(&kvm->irq_lock);
}
-/* The caller must hold kvm->lock mutex */
int kvm_request_irq_source_id(struct kvm *kvm)
{
unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
- int irq_source_id = find_first_zero_bit(bitmap,
+ int irq_source_id;
+
+ mutex_lock(&kvm->irq_lock);
+ irq_source_id = find_first_zero_bit(bitmap,
sizeof(kvm->arch.irq_sources_bitmap));
if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
@@ -196,6 +210,7 @@ int kvm_request_irq_source_id(struct kvm *kvm)
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
set_bit(irq_source_id, bitmap);
+ mutex_unlock(&kvm->irq_lock);
return irq_source_id;
}
@@ -206,6 +221,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
+ mutex_lock(&kvm->irq_lock);
if (irq_source_id < 0 ||
irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
@@ -214,19 +230,24 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
+ mutex_unlock(&kvm->irq_lock);
}
void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn)
{
+ mutex_lock(&kvm->irq_lock);
kimn->irq = irq;
hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
+ mutex_unlock(&kvm->irq_lock);
}
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn)
{
+ mutex_lock(&kvm->irq_lock);
hlist_del(&kimn->link);
+ mutex_unlock(&kvm->irq_lock);
}
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
@@ -234,6 +255,8 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
struct kvm_irq_mask_notifier *kimn;
struct hlist_node *n;
+ WARN_ON(!mutex_is_locked(&kvm->irq_lock));
+
hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
if (kimn->irq == irq)
kimn->func(kimn, mask);
@@ -249,7 +272,9 @@ static void __kvm_free_irq_routing(struct list_head *irq_routing)
void kvm_free_irq_routing(struct kvm *kvm)
{
+ mutex_lock(&kvm->irq_lock);
__kvm_free_irq_routing(&kvm->irq_routing);
+ mutex_unlock(&kvm->irq_lock);
}
static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
@@ -323,13 +348,13 @@ int kvm_set_irq_routing(struct kvm *kvm,
e = NULL;
}
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->irq_lock);
list_splice(&kvm->irq_routing, &tmp);
INIT_LIST_HEAD(&kvm->irq_routing);
list_splice(&irq_list, &kvm->irq_routing);
INIT_LIST_HEAD(&irq_list);
list_splice(&tmp, &irq_list);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->irq_lock);
r = 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 764554350ed8..e8abdd94e0a5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -59,9 +59,18 @@
#include "irq.h"
#endif
+#define CREATE_TRACE_POINTS
+#include <trace/events/kvm.h>
+
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+/*
+ * Ordering of locks:
+ *
+ * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
+ */
+
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
@@ -79,6 +88,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
static bool kvm_rebooting;
+static bool largepages_enabled = true;
+
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id)
@@ -120,17 +131,13 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
{
struct kvm_assigned_dev_kernel *assigned_dev;
struct kvm *kvm;
- int irq, i;
+ int i;
assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
interrupt_work);
kvm = assigned_dev->kvm;
- /* This is taken to safely inject irq inside the guest. When
- * the interrupt injection (or the ioapic code) uses a
- * finer-grained lock, update this
- */
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->irq_lock);
spin_lock_irq(&assigned_dev->assigned_dev_lock);
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
struct kvm_guest_msix_entry *guest_entries =
@@ -143,23 +150,13 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
kvm_set_irq(assigned_dev->kvm,
assigned_dev->irq_source_id,
guest_entries[i].vector, 1);
- irq = assigned_dev->host_msix_entries[i].vector;
- if (irq != 0)
- enable_irq(irq);
- assigned_dev->host_irq_disabled = false;
}
- } else {
+ } else
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
assigned_dev->guest_irq, 1);
- if (assigned_dev->irq_requested_type &
- KVM_DEV_IRQ_GUEST_MSI) {
- enable_irq(assigned_dev->host_irq);
- assigned_dev->host_irq_disabled = false;
- }
- }
spin_unlock_irq(&assigned_dev->assigned_dev_lock);
- mutex_unlock(&assigned_dev->kvm->lock);
+ mutex_unlock(&assigned_dev->kvm->irq_lock);
}
static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
@@ -179,8 +176,10 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
schedule_work(&assigned_dev->interrupt_work);
- disable_irq_nosync(irq);
- assigned_dev->host_irq_disabled = true;
+ if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
+ disable_irq_nosync(irq);
+ assigned_dev->host_irq_disabled = true;
+ }
out:
spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
@@ -215,7 +214,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
static void deassign_guest_irq(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
- kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
+ kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
assigned_dev->ack_notifier.gsi = -1;
if (assigned_dev->irq_source_id != -1)
@@ -417,6 +416,7 @@ static int assigned_device_enable_guest_msi(struct kvm *kvm,
{
dev->guest_irq = irq->guest_irq;
dev->ack_notifier.gsi = -1;
+ dev->host_irq_disabled = false;
return 0;
}
#endif
@@ -427,6 +427,7 @@ static int assigned_device_enable_guest_msix(struct kvm *kvm,
{
dev->guest_irq = irq->guest_irq;
dev->ack_notifier.gsi = -1;
+ dev->host_irq_disabled = false;
return 0;
}
#endif
@@ -693,11 +694,6 @@ out:
}
#endif
-static inline int valid_vcpu(int n)
-{
- return likely(n >= 0 && n < KVM_MAX_VCPUS);
-}
-
inline int kvm_is_mmio_pfn(pfn_t pfn)
{
if (pfn_valid(pfn)) {
@@ -742,14 +738,11 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
bool called = true;
struct kvm_vcpu *vcpu;
- if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
- cpumask_clear(cpus);
+ zalloc_cpumask_var(&cpus, GFP_ATOMIC);
me = get_cpu();
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
+ spin_lock(&kvm->requests_lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(req, &vcpu->requests))
continue;
cpu = vcpu->cpu;
@@ -762,6 +755,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
+ spin_unlock(&kvm->requests_lock);
put_cpu();
free_cpumask_var(cpus);
return called;
@@ -982,8 +976,11 @@ static struct kvm *kvm_create_vm(void)
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
+ spin_lock_init(&kvm->requests_lock);
kvm_io_bus_init(&kvm->pio_bus);
+ kvm_irqfd_init(kvm);
mutex_init(&kvm->lock);
+ mutex_init(&kvm->irq_lock);
kvm_io_bus_init(&kvm->mmio_bus);
init_rwsem(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
@@ -1003,19 +1000,25 @@ out:
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
+ int i;
+
if (!dont || free->rmap != dont->rmap)
vfree(free->rmap);
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
vfree(free->dirty_bitmap);
- if (!dont || free->lpage_info != dont->lpage_info)
- vfree(free->lpage_info);
+
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+ if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
+ vfree(free->lpage_info[i]);
+ free->lpage_info[i] = NULL;
+ }
+ }
free->npages = 0;
free->dirty_bitmap = NULL;
free->rmap = NULL;
- free->lpage_info = NULL;
}
void kvm_free_physmem(struct kvm *kvm)
@@ -1068,6 +1071,8 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
{
struct kvm *kvm = filp->private_data;
+ kvm_irqfd_release(kvm);
+
kvm_put_kvm(kvm);
return 0;
}
@@ -1087,7 +1092,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
int r;
gfn_t base_gfn;
unsigned long npages, ugfn;
- unsigned long largepages, i;
+ int lpages;
+ unsigned long i, j;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
@@ -1161,31 +1167,48 @@ int __kvm_set_memory_region(struct kvm *kvm,
else
new.userspace_addr = 0;
}
- if (npages && !new.lpage_info) {
- largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
- largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
+ if (!npages)
+ goto skip_lpage;
+
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+ int level = i + 2;
- new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
+ /* Avoid unused variable warning if no large pages */
+ (void)level;
- if (!new.lpage_info)
+ if (new.lpage_info[i])
+ continue;
+
+ lpages = 1 + (base_gfn + npages - 1) /
+ KVM_PAGES_PER_HPAGE(level);
+ lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
+
+ new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
+
+ if (!new.lpage_info[i])
goto out_free;
- memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
+ memset(new.lpage_info[i], 0,
+ lpages * sizeof(*new.lpage_info[i]));
- if (base_gfn % KVM_PAGES_PER_HPAGE)
- new.lpage_info[0].write_count = 1;
- if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
- new.lpage_info[largepages-1].write_count = 1;
+ if (base_gfn % KVM_PAGES_PER_HPAGE(level))
+ new.lpage_info[i][0].write_count = 1;
+ if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
+ new.lpage_info[i][lpages - 1].write_count = 1;
ugfn = new.userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
- * other, disable large page support for this slot
+ * other, or if explicitly asked to, disable large page
+ * support for this slot
*/
- if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1))
- for (i = 0; i < largepages; ++i)
- new.lpage_info[i].write_count = 1;
+ if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
+ !largepages_enabled)
+ for (j = 0; j < lpages; ++j)
+ new.lpage_info[i][j].write_count = 1;
}
+skip_lpage:
+
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
@@ -1194,7 +1217,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!new.dirty_bitmap)
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
+ if (old.npages)
+ kvm_arch_flush_shadow(kvm);
}
+#else /* not defined CONFIG_S390 */
+ new.user_alloc = user_alloc;
+ if (user_alloc)
+ new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
if (!npages)
@@ -1294,6 +1323,12 @@ out:
return r;
}
+void kvm_disable_largepages(void)
+{
+ largepages_enabled = false;
+}
+EXPORT_SYMBOL_GPL(kvm_disable_largepages);
+
int is_error_page(struct page *page)
{
return page == bad_page;
@@ -1709,24 +1744,18 @@ static struct file_operations kvm_vcpu_fops = {
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
- int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
- if (fd < 0)
- kvm_put_kvm(vcpu->kvm);
- return fd;
+ return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
}
/*
* Creates some virtual cpus. Good luck creating more than one.
*/
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(n))
- return -EINVAL;
+ struct kvm_vcpu *vcpu, *v;
- vcpu = kvm_arch_vcpu_create(kvm, n);
+ vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
@@ -1737,23 +1766,38 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
return r;
mutex_lock(&kvm->lock);
- if (kvm->vcpus[n]) {
- r = -EEXIST;
+ if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+ r = -EINVAL;
goto vcpu_destroy;
}
- kvm->vcpus[n] = vcpu;
- mutex_unlock(&kvm->lock);
+
+ kvm_for_each_vcpu(r, v, kvm)
+ if (v->vcpu_id == id) {
+ r = -EEXIST;
+ goto vcpu_destroy;
+ }
+
+ BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
- if (r < 0)
- goto unlink;
+ if (r < 0) {
+ kvm_put_kvm(kvm);
+ goto vcpu_destroy;
+ }
+
+ kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+ smp_wmb();
+ atomic_inc(&kvm->online_vcpus);
+
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+ if (kvm->bsp_vcpu_id == id)
+ kvm->bsp_vcpu = vcpu;
+#endif
+ mutex_unlock(&kvm->lock);
return r;
-unlink:
- mutex_lock(&kvm->lock);
- kvm->vcpus[n] = NULL;
vcpu_destroy:
mutex_unlock(&kvm->lock);
kvm_arch_vcpu_destroy(vcpu);
@@ -2194,6 +2238,7 @@ static long kvm_vm_ioctl(struct file *filp,
vfree(entries);
break;
}
+#endif /* KVM_CAP_IRQ_ROUTING */
#ifdef __KVM_HAVE_MSIX
case KVM_ASSIGN_SET_MSIX_NR: {
struct kvm_assigned_msix_nr entry_nr;
@@ -2216,7 +2261,26 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
#endif
-#endif /* KVM_CAP_IRQ_ROUTING */
+ case KVM_IRQFD: {
+ struct kvm_irqfd data;
+
+ r = -EFAULT;
+ if (copy_from_user(&data, argp, sizeof data))
+ goto out;
+ r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+ break;
+ }
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+ case KVM_SET_BOOT_CPU_ID:
+ r = 0;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus) != 0)
+ r = -EBUSY;
+ else
+ kvm->bsp_vcpu_id = arg;
+ mutex_unlock(&kvm->lock);
+ break;
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
@@ -2283,6 +2347,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+ case KVM_CAP_SET_BOOT_CPU_ID:
+#endif
return 1;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQ_ROUTING:
@@ -2330,7 +2397,7 @@ static long kvm_dev_ioctl(struct file *filp,
case KVM_TRACE_ENABLE:
case KVM_TRACE_PAUSE:
case KVM_TRACE_DISABLE:
- r = kvm_trace_ioctl(ioctl, arg);
+ r = -EOPNOTSUPP;
break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
@@ -2444,22 +2511,38 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
}
}
-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
- gpa_t addr, int len, int is_write)
+/* kvm_io_bus_write - called under kvm->slots_lock */
+int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
+ int len, const void *val)
{
int i;
+ for (i = 0; i < bus->dev_count; i++)
+ if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
+ return 0;
+ return -EOPNOTSUPP;
+}
- for (i = 0; i < bus->dev_count; i++) {
- struct kvm_io_device *pos = bus->devs[i];
-
- if (pos->in_range(pos, addr, len, is_write))
- return pos;
- }
+/* kvm_io_bus_read - called under kvm->slots_lock */
+int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
+{
+ int i;
+ for (i = 0; i < bus->dev_count; i++)
+ if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
+ return 0;
+ return -EOPNOTSUPP;
+}
- return NULL;
+void kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
+ struct kvm_io_device *dev)
+{
+ down_write(&kvm->slots_lock);
+ __kvm_io_bus_register_dev(bus, dev);
+ up_write(&kvm->slots_lock);
}
-void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+/* An unlocked version. Caller must have write lock on slots_lock. */
+void __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+ struct kvm_io_device *dev)
{
BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
@@ -2496,11 +2579,9 @@ static int vcpu_stat_get(void *_offset, u64 *val)
*val = 0;
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- *val += *(u32 *)((void *)vcpu + offset);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ *val += *(u32 *)((void *)vcpu + offset);
+
spin_unlock(&kvm_lock);
return 0;
}
@@ -2682,7 +2763,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
- kvm_trace_cleanup();
+ tracepoint_synchronize_unregister();
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
sysdev_unregister(&kvm_sysdev);
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
deleted file mode 100644
index f59874446440..000000000000
--- a/virt/kvm/kvm_trace.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * kvm trace
- *
- * It is designed to allow debugging traces of kvm to be generated
- * on UP / SMP machines. Each trace entry can be timestamped so that
- * it's possible to reconstruct a chronological record of trace events.
- * The implementation refers to blktrace kernel support.
- *
- * Copyright (c) 2008 Intel Corporation
- * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
- *
- * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
- *
- * Date: Feb 2008
- */
-
-#include <linux/module.h>
-#include <linux/relay.h>
-#include <linux/debugfs.h>
-#include <linux/ktime.h>
-
-#include <linux/kvm_host.h>
-
-#define KVM_TRACE_STATE_RUNNING (1 << 0)
-#define KVM_TRACE_STATE_PAUSE (1 << 1)
-#define KVM_TRACE_STATE_CLEARUP (1 << 2)
-
-struct kvm_trace {
- int trace_state;
- struct rchan *rchan;
- struct dentry *lost_file;
- atomic_t lost_records;
-};
-static struct kvm_trace *kvm_trace;
-
-struct kvm_trace_probe {
- const char *name;
- const char *format;
- u32 timestamp_in;
- marker_probe_func *probe_func;
-};
-
-static inline int calc_rec_size(int timestamp, int extra)
-{
- int rec_size = KVM_TRC_HEAD_SIZE;
-
- rec_size += extra;
- return timestamp ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size;
-}
-
-static void kvm_add_trace(void *probe_private, void *call_data,
- const char *format, va_list *args)
-{
- struct kvm_trace_probe *p = probe_private;
- struct kvm_trace *kt = kvm_trace;
- struct kvm_trace_rec rec;
- struct kvm_vcpu *vcpu;
- int i, size;
- u32 extra;
-
- if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING))
- return;
-
- rec.rec_val = TRACE_REC_EVENT_ID(va_arg(*args, u32));
- vcpu = va_arg(*args, struct kvm_vcpu *);
- rec.pid = current->tgid;
- rec.vcpu_id = vcpu->vcpu_id;
-
- extra = va_arg(*args, u32);
- WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX));
- extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX);
-
- rec.rec_val |= TRACE_REC_TCS(p->timestamp_in)
- | TRACE_REC_NUM_DATA_ARGS(extra);
-
- if (p->timestamp_in) {
- rec.u.timestamp.timestamp = ktime_to_ns(ktime_get());
-
- for (i = 0; i < extra; i++)
- rec.u.timestamp.extra_u32[i] = va_arg(*args, u32);
- } else {
- for (i = 0; i < extra; i++)
- rec.u.notimestamp.extra_u32[i] = va_arg(*args, u32);
- }
-
- size = calc_rec_size(p->timestamp_in, extra * sizeof(u32));
- relay_write(kt->rchan, &rec, size);
-}
-
-static struct kvm_trace_probe kvm_trace_probes[] = {
- { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace },
- { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace },
-};
-
-static int lost_records_get(void *data, u64 *val)
-{
- struct kvm_trace *kt = data;
-
- *val = atomic_read(&kt->lost_records);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops, lost_records_get, NULL, "%llu\n");
-
-/*
- * The relay channel is used in "no-overwrite" mode, it keeps trace of how
- * many times we encountered a full subbuffer, to tell user space app the
- * lost records there were.
- */
-static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf, size_t prev_padding)
-{
- struct kvm_trace *kt;
-
- if (!relay_buf_full(buf)) {
- if (!prev_subbuf) {
- /*
- * executed only once when the channel is opened
- * save metadata as first record
- */
- subbuf_start_reserve(buf, sizeof(u32));
- *(u32 *)subbuf = 0x12345678;
- }
-
- return 1;
- }
-
- kt = buf->chan->private_data;
- atomic_inc(&kt->lost_records);
-
- return 0;
-}
-
-static struct dentry *kvm_create_buf_file_callack(const char *filename,
- struct dentry *parent,
- int mode,
- struct rchan_buf *buf,
- int *is_global)
-{
- return debugfs_create_file(filename, mode, parent, buf,
- &relay_file_operations);
-}
-
-static int kvm_remove_buf_file_callback(struct dentry *dentry)
-{
- debugfs_remove(dentry);
- return 0;
-}
-
-static struct rchan_callbacks kvm_relay_callbacks = {
- .subbuf_start = kvm_subbuf_start_callback,
- .create_buf_file = kvm_create_buf_file_callack,
- .remove_buf_file = kvm_remove_buf_file_callback,
-};
-
-static int do_kvm_trace_enable(struct kvm_user_trace_setup *kuts)
-{
- struct kvm_trace *kt;
- int i, r = -ENOMEM;
-
- if (!kuts->buf_size || !kuts->buf_nr)
- return -EINVAL;
-
- kt = kzalloc(sizeof(*kt), GFP_KERNEL);
- if (!kt)
- goto err;
-
- r = -EIO;
- atomic_set(&kt->lost_records, 0);
- kt->lost_file = debugfs_create_file("lost_records", 0444, kvm_debugfs_dir,
- kt, &kvm_trace_lost_ops);
- if (!kt->lost_file)
- goto err;
-
- kt->rchan = relay_open("trace", kvm_debugfs_dir, kuts->buf_size,
- kuts->buf_nr, &kvm_relay_callbacks, kt);
- if (!kt->rchan)
- goto err;
-
- kvm_trace = kt;
-
- for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
- struct kvm_trace_probe *p = &kvm_trace_probes[i];
-
- r = marker_probe_register(p->name, p->format, p->probe_func, p);
- if (r)
- printk(KERN_INFO "Unable to register probe %s\n",
- p->name);
- }
-
- kvm_trace->trace_state = KVM_TRACE_STATE_RUNNING;
-
- return 0;
-err:
- if (kt) {
- if (kt->lost_file)
- debugfs_remove(kt->lost_file);
- if (kt->rchan)
- relay_close(kt->rchan);
- kfree(kt);
- }
- return r;
-}
-
-static int kvm_trace_enable(char __user *arg)
-{
- struct kvm_user_trace_setup kuts;
- int ret;
-
- ret = copy_from_user(&kuts, arg, sizeof(kuts));
- if (ret)
- return -EFAULT;
-
- ret = do_kvm_trace_enable(&kuts);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int kvm_trace_pause(void)
-{
- struct kvm_trace *kt = kvm_trace;
- int r = -EINVAL;
-
- if (kt == NULL)
- return r;
-
- if (kt->trace_state == KVM_TRACE_STATE_RUNNING) {
- kt->trace_state = KVM_TRACE_STATE_PAUSE;
- relay_flush(kt->rchan);
- r = 0;
- }
-
- return r;
-}
-
-void kvm_trace_cleanup(void)
-{
- struct kvm_trace *kt = kvm_trace;
- int i;
-
- if (kt == NULL)
- return;
-
- if (kt->trace_state == KVM_TRACE_STATE_RUNNING ||
- kt->trace_state == KVM_TRACE_STATE_PAUSE) {
-
- kt->trace_state = KVM_TRACE_STATE_CLEARUP;
-
- for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
- struct kvm_trace_probe *p = &kvm_trace_probes[i];
- marker_probe_unregister(p->name, p->probe_func, p);
- }
- marker_synchronize_unregister();
-
- relay_close(kt->rchan);
- debugfs_remove(kt->lost_file);
- kfree(kt);
- }
-}
-
-int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- long r = -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- switch (ioctl) {
- case KVM_TRACE_ENABLE:
- r = kvm_trace_enable(argp);
- break;
- case KVM_TRACE_PAUSE:
- r = kvm_trace_pause();
- break;
- case KVM_TRACE_DISABLE:
- r = 0;
- kvm_trace_cleanup();
- break;
- }
-
- return r;
-}