summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconfig.h1
-rw-r--r--drivers/acpi/acpica/acevents.h17
-rw-r--r--drivers/acpi/acpica/acglobal.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h15
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c335
-rw-r--r--drivers/acpi/acpica/evmisc.c303
-rw-r--r--drivers/acpi/acpica/evregion.c121
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c13
-rw-r--r--drivers/acpi/acpica/excreate.c3
-rw-r--r--drivers/acpi/acpica/nsrepair.c13
-rw-r--r--drivers/acpi/acpica/utdecode.c5
-rw-r--r--drivers/acpi/acpica/utmutex.c12
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/custom_method.c100
-rw-r--r--drivers/acpi/debugfs.c92
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/osl.c33
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/amba/bus.c5
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_marvell.c3
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/base/power/clock_ops.c20
-rw-r--r--drivers/base/power/main.c28
-rw-r--r--drivers/block/brd.c42
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/block/nbd.c22
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c3
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c12
-rw-r--r--drivers/bluetooth/hci_ldisc.c17
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/hpet.c25
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/clocksource/sh_cmt.c12
-rw-r--r--drivers/clocksource/sh_tmu.c12
-rw-r--r--drivers/cpufreq/cpufreq_stats.c9
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/TODO14
-rw-r--r--drivers/dma/at_hdmac.c376
-rw-r--r--drivers/dma/at_hdmac_regs.h30
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c272
-rw-r--r--drivers/dma/dw_dmac_regs.h2
-rw-r--r--drivers/dma/intel_mid_dma.c17
-rw-r--r--drivers/dma/ioat/dma_v2.c8
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/pch_dma.c96
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/shdma.c22
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/Kconfig33
-rw-r--r--drivers/gpio/Makefile8
-rw-r--r--drivers/gpio/gpio-exynos4.c386
-rw-r--r--drivers/gpio/gpio-nomadik.c1087
-rw-r--r--drivers/gpio/gpio-omap.c2009
-rw-r--r--drivers/gpio/gpio-plat-samsung.c206
-rw-r--r--drivers/gpio/gpio-s5pc100.c355
-rw-r--r--drivers/gpio/gpio-s5pv210.c288
-rw-r--r--drivers/gpio/gpio-u300.c700
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpio/langwell_gpio.c65
-rw-r--r--drivers/gpio/pca953x.c249
-rw-r--r--drivers/gpio/pch_gpio.c2
-rw-r--r--drivers/gpio/tps65910-gpio.c100
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/drm_gem.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c9
-rw-r--r--drivers/gpu/drm/drm_pci.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c80
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c92
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig9
-rw-r--r--drivers/gpu/drm/radeon/atombios.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c19
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c326
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c134
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c561
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h8
-rw-r--r--drivers/gpu/drm/radeon/ni.c13
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h8
-rw-r--r--drivers/gpu/drm/radeon/r600.c32
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c28
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-magicmouse.c10
-rw-r--r--drivers/hid/hid-multitouch.c74
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c8
-rw-r--r--drivers/hwmon/asus_atk0110.c5
-rw-r--r--drivers/hwmon/coretemp.c43
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/max6642.c22
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/hwmon/s3c-hwmon.c2
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/input/serio/serport.c10
-rw-r--r--drivers/isdn/gigaset/interface.c4
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c1
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/leds-lp5521.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/bitmap.c104
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/dm-io.c27
-rw-r--r--drivers/md/dm-kcopyd.c168
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap-persistent.c13
-rw-r--r--drivers/md/dm-snap.c10
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/md/md.c41
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c24
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c272
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c17
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c107
-rw-r--r--drivers/media/dvb/frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb/frontends/tda8261.c1
-rw-r--r--drivers/media/media-devnode.c4
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig12
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/fintek-cir.c684
-rw-r--r--drivers/media/rc/fintek-cir.h243
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c134
-rw-r--r--drivers/media/video/Kconfig6
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/video/gspca/coarse_expo_autogain.h116
-rw-r--r--drivers/media/video/gspca/kinect.c2
-rw-r--r--drivers/media/video/gspca/ov519.c8
-rw-r--r--drivers/media/video/gspca/sonixj.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c10
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c129
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c33
-rw-r--r--drivers/media/video/m5mols/Kconfig5
-rw-r--r--drivers/media/video/m5mols/Makefile3
-rw-r--r--drivers/media/video/m5mols/m5mols.h296
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c191
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c299
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c1004
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h399
-rw-r--r--drivers/media/video/omap3isp/isp.c2
-rw-r--r--drivers/media/video/soc_camera.c2
-rw-r--r--drivers/media/video/uvc/Makefile3
-rw-r--r--drivers/media/video/uvc/uvc_driver.c66
-rw-r--r--drivers/media/video/uvc/uvc_entity.c118
-rw-r--r--drivers/media/video/uvc/uvcvideo.h20
-rw-r--r--drivers/mfd/Kconfig9
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/mfd/tps65910-irq.c218
-rw-r--r--drivers/mfd/tps65910.c229
-rw-r--r--drivers/mfd/tps65911-comparator.c188
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/cs5535-mfgpt.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/kgdbts.c5
-rw-r--r--drivers/misc/lkdtm.c8
-rw-r--r--drivers/misc/sgi-xp/xpnet.c6
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/misc/ti-st/st_core.c6
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.c15
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c39
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/mmci.c37
-rw-r--r--drivers/mmc/host/of_mmc_spi.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c9
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c4
-rw-r--r--drivers/mmc/host/vub300.c11
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/Makefile3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c4
-rw-r--r--drivers/mtd/devices/lart.c9
-rw-r--r--drivers/mtd/devices/m25p80.c109
-rw-r--r--drivers/mtd/devices/ms02-nv.c4
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c45
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/devices/pmc551.c6
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c8
-rw-r--r--drivers/mtd/maps/Kconfig23
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c4
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c21
-rw-r--r--drivers/mtd/maps/cdb89712.c12
-rw-r--r--drivers/mtd/maps/ceiva.c6
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c20
-rw-r--r--drivers/mtd/maps/dilnetpc.c9
-rw-r--r--drivers/mtd/maps/dmv182.c4
-rw-r--r--drivers/mtd/maps/edb7312.c26
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/fortunet.c7
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c27
-rw-r--r--drivers/mtd/maps/h720x-flash.c6
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/impa7.c22
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/ixp2000.c4
-rw-r--r--drivers/mtd/maps/ixp4xx.c16
-rw-r--r--drivers/mtd/maps/l440gx.c4
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c45
-rw-r--r--drivers/mtd/maps/mbx860.c6
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c12
-rw-r--r--drivers/mtd/maps/octagon-5066.c4
-rw-r--r--drivers/mtd/maps/pci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c34
-rw-r--r--drivers/mtd/maps/physmap_of.c30
-rw-r--r--drivers/mtd/maps/plat-ram.c24
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c18
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/rpxlite.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c21
-rw-r--r--drivers/mtd/maps/sbc_gxx.c4
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/maps/scb2_flash.c6
-rw-r--r--drivers/mtd/maps/scx200_docflash.c16
-rw-r--r--drivers/mtd/maps/solutionengine.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/maps/tqm8xxl.c20
-rw-r--r--drivers/mtd/maps/ts5500_flash.c4
-rw-r--r--drivers/mtd/maps/tsunami_flash.c4
-rw-r--r--drivers/mtd/maps/uclinux.c12
-rw-r--r--drivers/mtd/maps/vmax301.c4
-rw-r--r--drivers/mtd/maps/vmu-flash.c4
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c15
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c55
-rw-r--r--drivers/mtd/mtdconcat.c4
-rw-r--r--drivers/mtd/mtdcore.c167
-rw-r--r--drivers/mtd/mtdcore.h6
-rw-r--r--drivers/mtd/mtdpart.c9
-rw-r--r--drivers/mtd/mtdswap.c8
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/ams-delta.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c13
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/autcpu12.c16
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c4
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c7
-rw-r--r--drivers/mtd/nand/cafe_nand.c11
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c19
-rw-r--r--drivers/mtd/nand/davinci_nand.c51
-rw-r--r--drivers/mtd/nand/denali.c247
-rw-r--r--drivers/mtd/nand/denali.h373
-rw-r--r--drivers/mtd/nand/diskonchip.c18
-rw-r--r--drivers/mtd/nand/edb7312.c9
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c12
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c25
-rw-r--r--drivers/mtd/nand/gpio.c4
-rw-r--r--drivers/mtd/nand/h1910.c5
-rw-r--r--drivers/mtd/nand/jz4740_nand.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c12
-rw-r--r--drivers/mtd/nand/mxc_nand.c64
-rw-r--r--drivers/mtd/nand/nand_base.c18
-rw-r--r--drivers/mtd/nand/nand_bbt.c27
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c65
-rw-r--r--drivers/mtd/nand/nomadik_nand.c7
-rw-r--r--drivers/mtd/nand/nuc900_nand.c4
-rw-r--r--drivers/mtd/nand/omap2.c32
-rw-r--r--drivers/mtd/nand/orion_nand.c14
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c12
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c15
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c13
-rw-r--r--drivers/mtd/nand/rtc_from4.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c75
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c16
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c10
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c14
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/onenand/onenand_sim.c3
-rw-r--r--drivers/mtd/onenand/samsung.c12
-rw-r--r--drivers/mtd/ubi/gluebi.c6
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/3c509.c14
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/arm/am79c961a.c126
-rw-r--r--drivers/net/arm/ep93xx_eth.c82
-rw-r--r--drivers/net/bfin_mac.c20
-rw-r--r--drivers/net/bonding/bond_main.c46
-rw-r--r--drivers/net/caif/caif_serial.c6
-rw-r--r--drivers/net/can/flexcan.c5
-rw-r--r--drivers/net/can/slcan.c9
-rw-r--r--drivers/net/davinci_emac.c32
-rw-r--r--drivers/net/depca.c35
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c29
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/gianfar_ethtool.c64
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/hp100.c16
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/ibmlana.c4
-rw-r--r--drivers/net/igb/igb_main.c3
-rw-r--r--drivers/net/irda/irtty-sir.c16
-rw-r--r--drivers/net/irda/smsc-ircc2.c44
-rw-r--r--drivers/net/ks8842.c2
-rw-r--r--drivers/net/ne3210.c15
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_synctty.c6
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/r8169.c10
-rw-r--r--drivers/net/sfc/mtd.c6
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/smc91x.c6
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tulip/de4x5.c4
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/kalmia.c384
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c11
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c14
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c24
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c21
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h4
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c25
-rw-r--r--drivers/net/wireless/wl12xx/conf.h3
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/scan.c49
-rw-r--r--drivers/net/wireless/wl12xx/scan.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c53
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/oprofile/buffer_sync.c21
-rw-r--r--drivers/oprofile/event_buffer.h2
-rw-r--r--drivers/oprofile/oprof.c2
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/intel-iommu.c242
-rw-r--r--drivers/pci/iova.c12
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c1
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c184
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/asus-laptop.c34
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus_acpi.c77
-rw-r--r--drivers/platform/x86/compal-laptop.c36
-rw-r--r--drivers/platform/x86/dell-laptop.c12
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c3
-rw-r--r--drivers/platform/x86/dell-wmi.c17
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/eeepc-wmi.c14
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c39
-rw-r--r--drivers/platform/x86/hdaps.c19
-rw-r--r--drivers/platform/x86/hp-wmi.c43
-rw-r--r--drivers/platform/x86/ibm_rtl.c23
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c5
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c72
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c607
-rw-r--r--drivers/platform/x86/intel_oaktrail.c396
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c23
-rw-r--r--drivers/platform/x86/msi-wmi.c45
-rw-r--r--drivers/platform/x86/sony-laptop.c106
-rw-r--r--drivers/platform/x86/tc1100-wmi.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c490
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c59
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c11
-rw-r--r--drivers/platform/x86/wmi.c10
-rw-r--r--drivers/platform/x86/xo15-ebook.c5
-rw-r--r--drivers/power/Kconfig9
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/bq27x00_battery.c11
-rw-r--r--drivers/power/ds2760_battery.c6
-rw-r--r--drivers/power/gpio-charger.c15
-rw-r--r--drivers/power/isp1704_charger.c22
-rw-r--r--drivers/power/max8903_charger.c391
-rw-r--r--drivers/power/test_power.c276
-rw-r--r--drivers/power/z2_battery.c20
-rw-r--r--drivers/ptp/ptp_chardev.c11
-rw-r--r--drivers/regulator/Kconfig6
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c93
-rw-r--r--drivers/regulator/max8997.c13
-rw-r--r--drivers/regulator/max8998.c22
-rw-r--r--drivers/regulator/mc13892-regulator.c18
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c3
-rw-r--r--drivers/regulator/tps65910-regulator.c993
-rw-r--r--drivers/regulator/twl-regulator.c564
-rw-r--r--drivers/regulator/wm831x-dcdc.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c12
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-dev.c3
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-m41t93.c2
-rw-r--r--drivers/rtc/rtc-puv3.c359
-rw-r--r--drivers/rtc/rtc-vt8500.c45
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/char/sclp.c7
-rw-r--r--drivers/s390/cio/qdio_main.c6
-rw-r--r--drivers/s390/kvm/kvm_virtio.c3
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c57
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c45
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_proc.c5
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/amba-pl022.c1
-rw-r--r--drivers/spi/omap2_mcspi.c2
-rw-r--r--drivers/spi/spi_bfin5xx.c7
-rw-r--r--drivers/spi/spi_bfin_sport.c952
-rw-r--r--drivers/spi/tle62x0.c3
-rw-r--r--drivers/ssb/driver_pcicore.c10
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/altera-stapl/altera-jtag.c2
-rw-r--r--drivers/staging/altera-stapl/altera.c2
-rw-r--r--drivers/staging/altera-stapl/altera.h49
-rw-r--r--drivers/staging/ath6kl/Kconfig1
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c3
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c2
-rw-r--r--drivers/staging/gma500/psb_drv.c15
-rw-r--r--drivers/staging/gma500/psb_fb.c10
-rw-r--r--drivers/staging/gma500/psb_intel_bios.c13
-rw-r--r--drivers/staging/iio/accel/adis16201.h2
-rw-r--r--drivers/staging/iio/accel/adis16203.h2
-rw-r--r--drivers/staging/iio/dac/max517.c2
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c10
-rw-r--r--drivers/staging/iio/industrialio-trigger.c1
-rw-r--r--drivers/staging/mei/init.c4
-rw-r--r--drivers/staging/olpc_dcon/Kconfig1
-rw-r--r--drivers/staging/rts_pstor/sd.c2
-rw-r--r--drivers/staging/usbip/stub_dev.c21
-rw-r--r--drivers/staging/usbip/stub_rx.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c38
-rw-r--r--drivers/target/target_core_configfs.c28
-rw-r--r--drivers/target/target_core_device.c34
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c15
-rw-r--r--drivers/target/target_core_transport.c74
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c84
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/target/tcm_fc/tfc_io.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/thermal/thermal_sys.c10
-rw-r--r--drivers/tty/n_gsm.c6
-rw-r--r--drivers/tty/n_hdlc.c18
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tty.c61
-rw-r--r--drivers/tty/serial/8250_pci.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c5
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/tty_buffer.c27
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/inode.c1
-rw-r--r--drivers/usb/gadget/Kconfig5
-rw-r--r--drivers/usb/gadget/amd5536udc.c1
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/mv_udc_core.c8
-rw-r--r--drivers/usb/gadget/net2280.c1
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c5
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c22
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c7
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/ohci-pxa27x.c7
-rw-r--r--drivers/usb/host/pci-quirks.c63
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-dbg.c8
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-pci.c40
-rw-r--r--drivers/usb/host/xhci-ring.c91
-rw-r--r--drivers/usb/host/xhci.c258
-rw-r--r--drivers/usb/host/xhci.h28
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--drivers/usb/otg/twl6030-usb.c10
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/storage/transport.c29
-rw-r--r--drivers/usb/storage/unusual_devs.h19
-rw-r--r--drivers/usb/storage/usb.c13
-rw-r--r--drivers/usb/storage/usb.h2
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.c138
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/video/arcfb.c5
-rw-r--r--drivers/video/aty/atyfb_base.c10
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp8870_bl.c1012
-rw-r--r--drivers/video/bf537-lq035.c1
-rw-r--r--drivers/video/broadsheetfb.c4
-rw-r--r--drivers/video/efifb.c34
-rw-r--r--drivers/video/hecubafb.c5
-rw-r--r--drivers/video/imxfb.c4
-rw-r--r--drivers/video/metronomefb.c4
-rw-r--r--drivers/video/modedb.c1
-rw-r--r--drivers/video/pxa168fb.c17
-rw-r--r--drivers/video/s3c-fb.c22
-rw-r--r--drivers/video/savage/savagefb_driver.c16
-rw-r--r--drivers/video/sh_mobile_hdmi.c18
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/vga16fb.c2
-rw-r--r--drivers/video/xen-fbfront.c3
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_ring.c53
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/xen/events.c20
-rw-r--r--drivers/xen/swiotlb-xen.c12
766 files changed, 29095 insertions, 7575 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 6b17f5864340..09f3232bcdcd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
obj-$(CONFIG_ARM_AMBA) += amba/
+# Many drivers will want to use DMA so this has to be made available
+# really early.
+obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2218db5ba9..de0e3df76776 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -369,6 +369,21 @@ config ACPI_HED
which is used to report some hardware errors notified via
SCI, mainly the corrected errors.
+config ACPI_CUSTOM_METHOD
+ tristate "Allow ACPI methods to be inserted/replaced at run time"
+ depends on DEBUG_FS
+ default n
+ help
+ This debug facility allows ACPI AML methods to me inserted and/or
+ replaced without rebooting the system. For details refer to:
+ Documentation/acpi/method-customizing.txt.
+
+ NOTE: This option is security sensitive, because it allows arbitrary
+ kernel memory to be written to by root (uid=0) users, allowing them
+ to bypass certain security measures (e.g. if root is not allowed to
+ load additional kernel modules after boot, this feature may be used
+ to override that restriction).
+
source "drivers/acpi/apei/Kconfig"
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b66fbb2fc85f..ecb26b4f29a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
+obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1224712fd0c..301bd2d388ad 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index ab87396c2c07..bc533dde16c4 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -187,7 +187,6 @@
/* Operation regions */
-#define ACPI_NUM_PREDEFINED_REGIONS 9
#define ACPI_USER_REGION_BEGIN 0x80
/* Maximum space_ids for Operation Regions */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41d247daf461..bea3b4899183 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -58,12 +58,6 @@ u32 acpi_ev_fixed_event_detect(void);
*/
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_init_global_lock_handler(void);
-
u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
acpi_status
@@ -71,6 +65,17 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
+ * evglock - Global Lock support
+ */
+acpi_status acpi_ev_init_global_lock_handler(void);
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout);
+
+acpi_status acpi_ev_release_global_lock(void);
+
+acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*
* evgpe - Low-level GPE support
*/
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d69750b83b36..73863d86f022 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*
* Global lock mutex is an actual AML mutex object
- * Global lock semaphore works in conjunction with the HW global lock
+ * Global lock semaphore works in conjunction with the actual global lock
+ * Global lock spinlock is used for "pending" handshake
*/
ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
ACPI_EXTERN u8 acpi_gbl_global_lock_present;
+ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
-ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
-#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
-#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
-#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
+ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f4f0998d3967..1077f17859ed 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -394,21 +394,6 @@
#define AML_CLASS_METHOD_CALL 0x09
#define AML_CLASS_UNKNOWN 0x0A
-/* Predefined Operation Region space_iDs */
-
-typedef enum {
- REGION_MEMORY = 0,
- REGION_IO,
- REGION_PCI_CONFIG,
- REGION_EC,
- REGION_SMBUS,
- REGION_CMOS,
- REGION_PCI_BAR,
- REGION_IPMI,
- REGION_DATA_TABLE, /* Internal use only */
- REGION_FIXED_HW = 0x7F
-} AML_REGION_TYPES;
-
/* Comparison operation codes for match_op operator */
typedef enum {
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 23a3b1ab20c1..324acec1179a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ex_create_region(op->named.data,
op->named.length,
- REGION_DATA_TABLE,
+ ACPI_ADR_SPACE_DATA_TABLE,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 4be4e921dfe1..976318138c56 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
((op->common.value.arg)->common.value.
integer);
} else {
- region_space = REGION_DATA_TABLE;
+ region_space = ACPI_ADR_SPACE_DATA_TABLE;
}
/*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
new file mode 100644
index 000000000000..56a562a1e5d7
--- /dev/null
+++ b/drivers/acpi/acpica/evglock.c
@@ -0,0 +1,335 @@
+/******************************************************************************
+ *
+ * Module Name: evglock - Global Lock support
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evglock")
+
+/* Local prototypes */
+static u32 acpi_ev_global_lock_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+ /* Attempt installation of the global lock handler */
+
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler,
+ NULL);
+
+ /*
+ * If the global lock does not exist on this platform, the attempt to
+ * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
+ * Map to AE_OK, but mark global lock as not present. Any attempt to
+ * actually use the global lock will be flagged with an error.
+ */
+ acpi_gbl_global_lock_present = FALSE;
+ if (status == AE_NO_HARDWARE_RESPONSE) {
+ ACPI_ERROR((AE_INFO,
+ "No response from Global Lock hardware, disabling lock"));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_gbl_global_lock_present = TRUE;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_global_lock_handler
+ *
+ * PARAMETERS: Context - From thread interface, not used
+ *
+ * RETURN: ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ * release interrupt occurs. If there is actually a pending
+ * request for the lock, signal the waiting thread.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ /*
+ * If a request for the global lock is not actually pending,
+ * we are done. This handles "spurious" global lock interrupts
+ * which are possible (and have been seen) with bad BIOSs.
+ */
+ if (!acpi_gbl_global_lock_pending) {
+ goto cleanup_and_exit;
+ }
+
+ /*
+ * Send a unit to the global lock semaphore. The actual acquisition
+ * of the global lock will be performed by the waiting thread.
+ */
+ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+
+ cleanup_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+ return (ACPI_INTERRUPT_HANDLED);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+ acpi_cpu_flags flags;
+ acpi_status status;
+ u8 acquired = FALSE;
+
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+ /*
+ * Only one thread can acquire the GL at a time, the global_lock_mutex
+ * enforces this. This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
+ os_mutex, timeout);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Update the global lock handle and check for wraparound. The handle is
+ * only used for the external global lock interfaces, but it is updated
+ * here to properly handle the case where a single thread may acquire the
+ * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+ * handle is therefore updated on the first acquire from a given thread
+ * regardless of where the acquisition request originated.
+ */
+ acpi_gbl_global_lock_handle++;
+ if (acpi_gbl_global_lock_handle == 0) {
+ acpi_gbl_global_lock_handle = 1;
+ }
+
+ /*
+ * Make sure that a global lock actually exists. If not, just
+ * treat the lock as a standard mutex.
+ */
+ if (!acpi_gbl_global_lock_present) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ do {
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+ if (acquired) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+ break;
+ }
+
+ /*
+ * Did not get the lock. The pending bit was set above, and
+ * we must now wait until we receive the global lock
+ * released interrupt.
+ */
+ acpi_gbl_global_lock_pending = TRUE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Waiting for hardware Global Lock\n"));
+
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_semaphore
+ (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ } while (ACPI_SUCCESS(status));
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_release_global_lock
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+ u8 pending = FALSE;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+ /* Lock must be already acquired */
+
+ if (!acpi_gbl_global_lock_acquired) {
+ ACPI_WARNING((AE_INFO,
+ "Cannot release the ACPI Global Lock, it has not been acquired"));
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
+ }
+
+ if (acpi_gbl_global_lock_present) {
+
+ /* Allow any thread to release the lock */
+
+ ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
+
+ /*
+ * If the pending bit was set, we must write GBL_RLS to the control
+ * register
+ */
+ if (pending) {
+ status =
+ acpi_write_bit_register
+ (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+ ACPI_ENABLE_EVENT);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Released hardware Global Lock\n"));
+ }
+
+ acpi_gbl_global_lock_acquired = FALSE;
+
+ /* Release the local GL mutex */
+
+ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 7dc80946f7bd..d0b331844427 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -45,7 +45,6 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
-static u32 acpi_ev_global_lock_handler(void *context);
-
-static acpi_status acpi_ev_remove_global_lock_handler(void);
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_is_notify_object
@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
acpi_ut_delete_generic_state(notify_info);
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_global_lock_handler
- *
- * PARAMETERS: Context - From thread interface, not used
- *
- * RETURN: ACPI_INTERRUPT_HANDLED
- *
- * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- * release interrupt occurs. If there's a thread waiting for
- * the global lock, signal it.
- *
- * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
- * this is not possible for some reason, a separate thread will have to be
- * scheduled to do this.
- *
- ******************************************************************************/
-static u8 acpi_ev_global_lock_pending;
-
-static u32 acpi_ev_global_lock_handler(void *context)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- if (!acpi_ev_global_lock_pending) {
- goto out;
- }
-
- /* Send a unit to the semaphore */
-
- status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
- }
-
- acpi_ev_global_lock_pending = FALSE;
-
- out:
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return (ACPI_INTERRUPT_HANDLED);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_init_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Install a handler for the global lock release event
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_init_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
-
- /* Attempt installation of the global lock handler */
-
- status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler,
- NULL);
-
- /*
- * If the global lock does not exist on this platform, the attempt to
- * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
- * Map to AE_OK, but mark global lock as not present. Any attempt to
- * actually use the global lock will be flagged with an error.
- */
- if (status == AE_NO_HARDWARE_RESPONSE) {
- ACPI_ERROR((AE_INFO,
- "No response from Global Lock hardware, disabling lock"));
-
- acpi_gbl_global_lock_present = FALSE;
- return_ACPI_STATUS(AE_OK);
- }
-
- acpi_gbl_global_lock_present = TRUE;
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_remove_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove the handler for the Global Lock
- *
- ******************************************************************************/
-
-static acpi_status acpi_ev_remove_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
-
- acpi_gbl_global_lock_present = FALSE;
- status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler);
-
- return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ev_acquire_global_lock
- *
- * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Attempt to gain ownership of the Global Lock.
- *
- * MUTEX: Interpreter must be locked
- *
- * Note: The original implementation allowed multiple threads to "acquire" the
- * Global Lock, and the OS would hold the lock until the last thread had
- * released it. However, this could potentially starve the BIOS out of the
- * lock, especially in the case where there is a tight handshake between the
- * Embedded Controller driver and the BIOS. Therefore, this implementation
- * allows only one thread to acquire the HW Global Lock at a time, and makes
- * the global lock appear as a standard mutex on the OS side.
- *
- *****************************************************************************/
-static acpi_thread_id acpi_ev_global_lock_thread_id;
-static int acpi_ev_global_lock_acquired;
-
-acpi_status acpi_ev_acquire_global_lock(u16 timeout)
-{
- acpi_cpu_flags flags;
- acpi_status status = AE_OK;
- u8 acquired = FALSE;
-
- ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
-
- /*
- * Only one thread can acquire the GL at a time, the global_lock_mutex
- * enforces this. This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
- if (status == AE_TIME) {
- if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
- acpi_ev_global_lock_acquired++;
- return AE_OK;
- }
- }
-
- if (ACPI_FAILURE(status)) {
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex,
- timeout);
- }
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
- acpi_ev_global_lock_acquired++;
-
- /*
- * Update the global lock handle and check for wraparound. The handle is
- * only used for the external global lock interfaces, but it is updated
- * here to properly handle the case where a single thread may acquire the
- * lock via both the AML and the acpi_acquire_global_lock interfaces. The
- * handle is therefore updated on the first acquire from a given thread
- * regardless of where the acquisition request originated.
- */
- acpi_gbl_global_lock_handle++;
- if (acpi_gbl_global_lock_handle == 0) {
- acpi_gbl_global_lock_handle = 1;
- }
-
- /*
- * Make sure that a global lock actually exists. If not, just treat the
- * lock as a standard mutex.
- */
- if (!acpi_gbl_global_lock_present) {
- acpi_gbl_global_lock_acquired = TRUE;
- return_ACPI_STATUS(AE_OK);
- }
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- do {
-
- /* Attempt to acquire the actual hardware lock */
-
- ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
- if (acquired) {
- acpi_gbl_global_lock_acquired = TRUE;
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Acquired hardware Global Lock\n"));
- break;
- }
-
- acpi_ev_global_lock_pending = TRUE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- /*
- * Did not get the lock. The pending bit was set above, and we
- * must wait until we get the global lock released interrupt.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Waiting for hardware Global Lock\n"));
-
- /*
- * Wait for handshake with the global lock interrupt handler.
- * This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_semaphore(
- acpi_gbl_global_lock_semaphore,
- ACPI_WAIT_FOREVER);
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- } while (ACPI_SUCCESS(status));
-
- acpi_ev_global_lock_pending = FALSE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_release_global_lock
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Releases ownership of the Global Lock.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_release_global_lock(void)
-{
- u8 pending = FALSE;
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(ev_release_global_lock);
-
- /* Lock must be already acquired */
-
- if (!acpi_gbl_global_lock_acquired) {
- ACPI_WARNING((AE_INFO,
- "Cannot release the ACPI Global Lock, it has not been acquired"));
- return_ACPI_STATUS(AE_NOT_ACQUIRED);
- }
-
- acpi_ev_global_lock_acquired--;
- if (acpi_ev_global_lock_acquired > 0) {
- return AE_OK;
- }
-
- if (acpi_gbl_global_lock_present) {
-
- /* Allow any thread to release the lock */
-
- ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
-
- /*
- * If the pending bit was set, we must write GBL_RLS to the control
- * register
- */
- if (pending) {
- status =
- acpi_write_bit_register
- (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
- ACPI_ENABLE_EVENT);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Released hardware Global Lock\n"));
- }
-
- acpi_gbl_global_lock_acquired = FALSE;
-
- /* Release the local GL mutex */
- acpi_ev_global_lock_thread_id = 0;
- acpi_ev_global_lock_acquired = 0;
- acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
- return_ACPI_STATUS(status);
-}
-
/******************************************************************************
*
* FUNCTION: acpi_ev_terminate
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bea7223d7a71..f0edf5c43c03 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -55,6 +55,8 @@ static u8
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
+static void acpi_ev_orphan_ec_reg_method(void);
+
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Now stop region accesses by executing the _REG method */
- status = acpi_ev_execute_reg_method(region_obj, 0);
+ status =
+ acpi_ev_execute_reg_method(region_obj,
+ ACPI_REG_DISCONNECT);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]",
@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
NULL, &space_id, NULL);
+ /* Special case for EC: handle "orphan" _REG methods with no region */
+
+ if (space_id == ACPI_ADR_SPACE_EC) {
+ acpi_ev_orphan_ec_reg_method();
+ }
+
return_ACPI_STATUS(status);
}
@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
return (AE_OK);
}
- status = acpi_ev_execute_reg_method(obj_desc, 1);
+ status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
return (status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_orphan_ec_reg_method
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
+ * device. This is a _REG method that has no corresponding region
+ * within the EC device scope. The orphan _REG method appears to
+ * have been enabled by the description of the ECDT in the ACPI
+ * specification: "The availability of the region space can be
+ * detected by providing a _REG method object underneath the
+ * Embedded Controller device."
+ *
+ * To quickly access the EC device, we use the EC_ID that appears
+ * within the ECDT. Otherwise, we would need to perform a time-
+ * consuming namespace walk, executing _HID methods to find the
+ * EC device.
+ *
+ ******************************************************************************/
+
+static void acpi_ev_orphan_ec_reg_method(void)
+{
+ struct acpi_table_ecdt *table;
+ acpi_status status;
+ struct acpi_object_list args;
+ union acpi_object objects[2];
+ struct acpi_namespace_node *ec_device_node;
+ struct acpi_namespace_node *reg_method;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
+
+ /* Get the ECDT (if present in system) */
+
+ status = acpi_get_table(ACPI_SIG_ECDT, 0,
+ ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
+ &table));
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* We need a valid EC_ID string */
+
+ if (!(*table->id)) {
+ return_VOID;
+ }
+
+ /* Namespace is currently locked, must release */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Get a handle to the EC device referenced in the ECDT */
+
+ status = acpi_get_handle(NULL,
+ ACPI_CAST_PTR(char, table->id),
+ ACPI_CAST_PTR(acpi_handle, &ec_device_node));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Get a handle to a _REG method immediately under the EC device */
+
+ status = acpi_get_handle(ec_device_node,
+ METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
+ &reg_method));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /*
+ * Execute the _REG method only if there is no Operation Region in
+ * this scope with the Embedded Controller space ID. Otherwise, it
+ * will already have been executed. Note, this allows for Regions
+ * with other space IDs to be present; but the code below will then
+ * execute the _REG method with the EC space ID argument.
+ */
+ next_node = acpi_ns_get_next_node(ec_device_node, NULL);
+ while (next_node) {
+ if ((next_node->type == ACPI_TYPE_REGION) &&
+ (next_node->object) &&
+ (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
+ goto exit; /* Do not execute _REG */
+ }
+ next_node = acpi_ns_get_next_node(ec_device_node, next_node);
+ }
+
+ /* Evaluate the _REG(EC,Connect) method */
+
+ args.count = 2;
+ args.pointer = objects;
+ objects[0].type = ACPI_TYPE_INTEGER;
+ objects[0].integer.value = ACPI_ADR_SPACE_EC;
+ objects[1].type = ACPI_TYPE_INTEGER;
+ objects[1].integer.value = ACPI_REG_CONNECT;
+
+ status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+
+ exit:
+ /* We ignore all errors from above, don't care */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 9659cee6093e..55a5d35ef34a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
status =
acpi_ev_execute_reg_method
- (region_obj, 1);
+ (region_obj, ACPI_REG_CONNECT);
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index c85c8c45599d..00cd95692a91 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_DATA_TABLE:
- if (acpi_gbl_reg_methods_executed) {
+ if (!acpi_gbl_reg_methods_executed) {
- /* Run all _REG methods for this address space */
-
- status = acpi_ev_execute_reg_methods(node, space_id);
+ /* We will defer execution of the _REG methods for this space */
+ goto unlock_and_exit;
}
break;
default:
-
- status = acpi_ev_execute_reg_methods(node, space_id);
break;
}
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e7b372d17667..110711afada8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
* range
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN)) {
+ (region_space < ACPI_USER_REGION_BEGIN) &&
+ (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 1d76ac85b5e7..ac7b854b0bd7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
*
* Additional possible repairs:
*
- * Optional/unnecessary NULL package elements removed
* Required package elements that are NULL replaced by Integer/String/Buffer
* Incorrect standalone package wrapped with required outer package
*
@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
ACPI_FUNCTION_NAME(ns_remove_null_elements);
/*
- * PTYPE1 packages contain no subpackages.
- * PTYPE2 packages contain a variable number of sub-packages. We can
- * safely remove all NULL elements from the PTYPE2 packages.
+ * We can safely remove all NULL elements from these package types:
+ * PTYPE1_VAR packages contain a variable number of simple data types.
+ * PTYPE2 packages contain a variable number of sub-packages.
*/
switch (package_type) {
- case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_VAR:
- case ACPI_PTYPE1_OPTION:
- return;
-
case ACPI_PTYPE2:
case ACPI_PTYPE2_COUNT:
case ACPI_PTYPE2_PKG_COUNT:
@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
break;
default:
+ case ACPI_PTYPE1_FIXED:
+ case ACPI_PTYPE1_OPTION:
return;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 136a814cec69..97cb36f85ce9 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI",
- "DataTable"
+ "IPMI"
};
char *acpi_ut_get_region_name(u8 space_id)
@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion");
+ } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
+ return ("DataTable");
} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
return ("FunctionalFixedHW");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index a946c689f03b..7d797e2baecd 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
/* Create the spinlocks for use at interrupt level */
- spin_lock_init(acpi_gbl_gpe_lock);
- spin_lock_init(acpi_gbl_hardware_lock);
- spin_lock_init(acpi_ev_global_lock_pending_lock);
+ status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+
+ status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9749980ca6ca..d1e06c182cdb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
acpi_status status = AE_OK;
char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
/* Make sure this is a valid target state */
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
new file mode 100644
index 000000000000..5d42c2414ae5
--- /dev/null
+++ b/drivers/acpi/custom_method.c
@@ -0,0 +1,100 @@
+/*
+ * debugfs.c - ACPI debugfs interface to userspace.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("custom_method");
+MODULE_LICENSE("GPL");
+
+static struct dentry *cm_dentry;
+
+/* /sys/kernel/debug/acpi/custom_method */
+
+static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
+ return -EINVAL;
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ buf = NULL;
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ buf = NULL;
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+ .llseek = default_llseek,
+};
+
+static int __init acpi_custom_method_init(void)
+{
+ if (acpi_debugfs_dir == NULL)
+ return -ENOENT;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
+ acpi_debugfs_dir, NULL, &cm_fops);
+ if (cm_dentry == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_custom_method_exit(void)
+{
+ if (cm_dentry)
+ debugfs_remove(cm_dentry);
+ }
+
+module_init(acpi_custom_method_init);
+module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 384f7abcff77..182a9fc36355 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -3,100 +3,16 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
+struct dentry *acpi_debugfs_dir;
+EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
-/* /sys/modules/acpi/parameters/aml_debug_output */
-
-module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
- bool, 0644);
-MODULE_PARM_DESC(aml_debug_output,
- "To enable/disable the ACPI Debug Object output.");
-
-/* /sys/kernel/debug/acpi/custom_method */
-
-static ssize_t cm_write(struct file *file, const char __user * user_buf,
- size_t count, loff_t *ppos)
+void __init acpi_debugfs_init(void)
{
- static char *buf;
- static u32 max_size;
- static u32 uncopied_bytes;
-
- struct acpi_table_header table;
- acpi_status status;
-
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
- return -EINVAL;
- if (copy_from_user(&table, user_buf,
- sizeof(struct acpi_table_header)))
- return -EFAULT;
- uncopied_bytes = max_size = table.length;
- buf = kzalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- if (buf == NULL)
- return -EINVAL;
-
- if ((*ppos > max_size) ||
- (*ppos + count > max_size) ||
- (*ppos + count < count) ||
- (count > uncopied_bytes))
- return -EINVAL;
-
- if (copy_from_user(buf + (*ppos), user_buf, count)) {
- kfree(buf);
- buf = NULL;
- return -EFAULT;
- }
-
- uncopied_bytes -= count;
- *ppos += count;
-
- if (!uncopied_bytes) {
- status = acpi_install_method(buf);
- kfree(buf);
- buf = NULL;
- if (ACPI_FAILURE(status))
- return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
-
- return count;
-}
-
-static const struct file_operations cm_fops = {
- .write = cm_write,
- .llseek = default_llseek,
-};
-
-int __init acpi_debugfs_init(void)
-{
- struct dentry *acpi_dir, *cm_dentry;
-
- acpi_dir = debugfs_create_dir("acpi", NULL);
- if (!acpi_dir)
- goto err;
-
- cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
- acpi_dir, NULL, &cm_fops);
- if (!cm_dentry)
- goto err;
-
- return 0;
-
-err:
- if (acpi_dir)
- debugfs_remove(acpi_dir);
- return -EINVAL;
+ acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fa848c4116a8..b19a18dd994f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -69,7 +69,6 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
-#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len,
- int force_poll)
+ u8 * rdata, unsigned rdata_len)
{
struct transaction t = {.command = command,
.wdata = wdata, .rdata = rdata,
@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
mutex_unlock(&ec->lock);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
-
static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
return -EINVAL;
}
- ec->handle = device->handle;
-
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods, NULL, ec, NULL);
@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
{
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
+ {
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
+ {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
{},
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4bfb759deb10..ca75b9ce0489 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,9 +28,10 @@ int acpi_scan_init(void);
int acpi_sysfs_init(void);
#ifdef CONFIG_DEBUG_FS
+extern struct dentry *acpi_debugfs_dir;
int acpi_debugfs_init(void);
#else
-static inline int acpi_debugfs_init(void) { return 0; }
+static inline void acpi_debugfs_init(void) { return; }
#endif
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45ad4ffef533..52ca9649d769 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete);
-/*
- * Deallocate the memory for a spinlock.
- */
-void acpi_os_delete_lock(acpi_spinlock handle)
-{
- return;
-}
-
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
@@ -1341,6 +1333,31 @@ int acpi_resources_are_enforced(void)
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
+ * Create and initialize a spinlock.
+ */
+acpi_status
+acpi_os_create_lock(acpi_spinlock *out_handle)
+{
+ spinlock_t *lock;
+
+ lock = ACPI_ALLOCATE(sizeof(spinlock_t));
+ if (!lock)
+ return AE_NO_MEMORY;
+ spin_lock_init(lock);
+ *out_handle = lock;
+
+ return AE_OK;
+}
+
+/*
+ * Deallocate the memory for a spinlock.
+ */
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+ ACPI_FREE(handle);
+}
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25bf17da69fd..02d2a4c9084d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
{},
};
-#ifdef CONFIG_SMP
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -165,7 +164,9 @@ exit:
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
+#ifdef CONFIG_SMP
int i;
+#endif
int apic_id = -1;
apic_id = map_mat_entry(handle, type, acpi_id);
@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
if (apic_id == -1)
return apic_id;
+#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
return i;
}
+#else
+ /* In UP kernel, only processor 0 is valid */
+ if (apic_id == 0)
+ return apic_id;
+#endif
return -1;
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#endif
static bool __init processor_physically_present(acpi_handle handle)
{
@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
- if ((cpuid == -1) && (num_possible_cpus() > 1))
+ if (cpuid == -1)
return false;
return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d615b7d69bca..431ab11c8c1b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (c1e_detected)
+ if (amd_e400_c1e_detected)
type = ACPI_STATE_C1;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 61891e75583d..77255f250dbb 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
+
+/* /sys/modules/acpi/parameters/aml_debug_output */
+
+module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
+ bool, 0644);
+MODULE_PARM_DESC(aml_debug_output,
+ "To enable/disable the ACPI Debug Object output.");
+
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7025593a58c8..d74926e0939e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -603,6 +603,10 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_out;
+ /* Hard-coded primecell ID instead of plug-n-play */
+ if (dev->periphid != 0)
+ goto skip_probe;
+
/*
* Dynamically calculate the size of the resource
* and use this for iomap
@@ -643,6 +647,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_release;
+ skip_probe:
ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 736bee5dafeb..000d03ae6653 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
- { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* End Marker */
{ }
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index dfb6e9d3d759..7f099d6e4e0b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2802,10 +2802,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
/*
- * Some controllers can't be frozen very well and may set
- * spuruious error conditions during reset. Clear accumulated
- * error information. As reset is the final recovery action,
- * nothing is lost by doing this.
+ * Some controllers can't be frozen very well and may set spurious
+ * error conditions during reset. Clear accumulated error
+ * information and re-thaw the port if frozen. As reset is the
+ * final recovery action and we cross check link onlineness against
+ * device classification later, no hotplug event is lost by this.
*/
spin_lock_irqsave(link->ap->lock, flags);
memset(&link->eh_info, 0, sizeof(link->eh_info));
@@ -2814,6 +2815,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
spin_unlock_irqrestore(link->ap->lock, flags);
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ ata_eh_thaw_port(ap);
+
/*
* Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d51f9795c064..927f968e99d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
*/
int ata_sas_port_start(struct ata_port *ap)
{
+ /*
+ * the port is marked as frozen at allocation time, but if we don't
+ * have new eh, we won't thaw it
+ */
+ if (!ap->ops->error_handler)
+ ap->pflags &= ~ATA_PFLAG_FROZEN;
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_port_start);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 75a6a0c0094f..5d7f58a7e34d 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
{ PCI_DEVICE(0x11AB, 0x6121), },
{ PCI_DEVICE(0x11AB, 0x6123), },
{ PCI_DEVICE(0x11AB, 0x6145), },
+ { PCI_DEVICE(0x1B4B, 0x91A0), },
+ { PCI_DEVICE(0x1B4B, 0x91A4), },
+
{ } /* terminate list */
};
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 1c4b3aa4c7c4..dc88a39e7db8 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
/*
* Function: get_burst_length_encode
* arguments: datalength: length in bytes of data
- * returns value to be programmed in register corrresponding to data length
+ * returns value to be programmed in register corresponding to data length
* This value is effectively the log(base 2) of the length
*/
static int get_burst_length_encode(int datalength)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index c0dd09df7be8..ad367c4139b1 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -291,7 +291,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
- char *con_id;
+ char **con_id;
int error;
dev_dbg(dev, "%s() %ld\n", __func__, action);
@@ -309,8 +309,8 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
dev->pwr_domain = clknb->pwr_domain;
if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids[0]; *con_id; con_id++)
- pm_runtime_clk_add(dev, con_id);
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ pm_runtime_clk_add(dev, *con_id);
} else {
pm_runtime_clk_add(dev, NULL);
}
@@ -380,25 +380,25 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
- char *con_id;
+ char **con_id;
dev_dbg(dev, "%s() %ld\n", __func__, action);
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids[0]; *con_id; con_id++)
- enable_clock(dev, con_id);
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ enable_clock(dev, *con_id);
} else {
enable_clock(dev, NULL);
}
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids[0]; *con_id; con_id++)
- disable_clock(dev, con_id);
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ disable_clock(dev, *con_id);
} else {
disable_clock(dev, NULL);
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa6320207745..06f09bf89cb2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.in_suspend)
+ if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.in_suspend = false;
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
+
+ if (!dev->power.is_suspended)
+ goto Unlock;
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = false;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (async_error)
- goto End;
+ goto Unlock;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
- goto End;
+ goto Unlock;
}
if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = !error;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
break;
}
- dev->power.in_suspend = true;
+ dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b7f51e4594f8..dba1c32e1ddf 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -35,10 +35,6 @@
*/
struct brd_device {
int brd_number;
- int brd_refcnt;
- loff_t brd_offset;
- loff_t brd_sizelimit;
- unsigned brd_blocksize;
struct request_queue *brd_queue;
struct gendisk *brd_disk;
@@ -440,11 +436,11 @@ static int rd_nr;
int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
static int max_part;
static int part_shift;
-module_param(rd_nr, int, 0);
+module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
-module_param(rd_size, int, 0);
+module_param(rd_size, int, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
-module_param(max_part, int, 0);
+module_param(max_part, int, S_IRUGO);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
@@ -552,7 +548,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
struct kobject *kobj;
mutex_lock(&brd_devices_mutex);
- brd = brd_init_one(dev & MINORMASK);
+ brd = brd_init_one(MINOR(dev) >> part_shift);
kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
mutex_unlock(&brd_devices_mutex);
@@ -575,25 +571,39 @@ static int __init brd_init(void)
*
* (1) if rd_nr is specified, create that many upfront, and this
* also becomes a hard limit.
- * (2) if rd_nr is not specified, create 1 rd device on module
- * load, user can further extend brd device by create dev node
- * themselves and have kernel automatically instantiate actual
- * device on-demand.
+ * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
+ * (default 16) rd device on module load, user can further
+ * extend brd device by create dev node themselves and have
+ * kernel automatically instantiate actual device on-demand.
*/
part_shift = 0;
- if (max_part > 0)
+ if (max_part > 0) {
part_shift = fls(max_part);
+ /*
+ * Adjust max_part according to part_shift as it is exported
+ * to user space so that user can decide correct minor number
+ * if [s]he want to create more devices.
+ *
+ * Note that -1 is required because partition 0 is reserved
+ * for the whole disk.
+ */
+ max_part = (1UL << part_shift) - 1;
+ }
+
+ if ((1UL << part_shift) > DISK_MAX_PARTS)
+ return -EINVAL;
+
if (rd_nr > 1UL << (MINORBITS - part_shift))
return -EINVAL;
if (rd_nr) {
nr = rd_nr;
- range = rd_nr;
+ range = rd_nr << part_shift;
} else {
nr = CONFIG_BLK_DEV_RAM_COUNT;
- range = 1UL << (MINORBITS - part_shift);
+ range = 1UL << MINORBITS;
}
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
@@ -632,7 +642,7 @@ static void __exit brd_exit(void)
unsigned long range;
struct brd_device *brd, *next;
- range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
+ range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index db8f88586c8d..98de8f418676 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void)
{
unsigned long flags;
+ WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
spin_lock_irqsave(&floppy_hlt_lock, flags);
if (!hlt_disabled) {
hlt_disabled = 1;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c59a672a3de0..76c8da78212b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1540,9 +1540,9 @@ static const struct block_device_operations lo_fops = {
* And now the modules code and kernel interface.
*/
static int max_loop;
-module_param(max_loop, int, 0);
+module_param(max_loop, int, S_IRUGO);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
-module_param(max_part, int, 0);
+module_param(max_part, int, S_IRUGO);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1688,9 +1688,20 @@ static int __init loop_init(void)
*/
part_shift = 0;
- if (max_part > 0)
+ if (max_part > 0) {
part_shift = fls(max_part);
+ /*
+ * Adjust max_part according to part_shift as it is exported
+ * to user space so that user can decide correct minor number
+ * if [s]he want to create more devices.
+ *
+ * Note that -1 is required because partition 0 is reserved
+ * for the whole disk.
+ */
+ max_part = (1UL << part_shift) - 1;
+ }
+
if ((1UL << part_shift) > DISK_MAX_PARTS)
return -EINVAL;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e6fc716aca45..f533f3375e24 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
if (lo->xmit_timeout)
del_timer_sync(&ti);
} else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+ result = kernel_recvmsg(sock, &msg, &iov, 1, size,
+ msg.msg_flags);
if (signal_pending(current)) {
siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
return -ENOMEM;
part_shift = 0;
- if (max_part > 0)
+ if (max_part > 0) {
part_shift = fls(max_part);
+ /*
+ * Adjust max_part according to part_shift as it is exported
+ * to user space so that user can know the max number of
+ * partition kernel should be able to manage.
+ *
+ * Note that -1 is required because partition 0 is reserved
+ * for the whole disk.
+ */
+ max_part = (1UL << part_shift) - 1;
+ }
+
+ if ((1UL << part_shift) > DISK_MAX_PARTS)
+ return -EINVAL;
+
+ if (nbds_max > 1UL << (MINORBITS - part_shift))
+ return -EINVAL;
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a0aabd904a51..46b8136c31bb 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -321,7 +321,6 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ecf89cdf006..079c08808d8a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,10 +6,13 @@
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <scsi/scsi_cmnd.h>
#define PART_BITS 4
static int major, index;
+struct workqueue_struct *virtblk_wq;
struct virtio_blk
{
@@ -26,6 +29,9 @@ struct virtio_blk
mempool_t *pool;
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -141,7 +147,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
- sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+ sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
}
@@ -291,6 +297,46 @@ static ssize_t virtblk_serial_show(struct device *dev,
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+static void virtblk_config_changed_work(struct work_struct *work)
+{
+ struct virtio_blk *vblk =
+ container_of(work, struct virtio_blk, config_work);
+ struct virtio_device *vdev = vblk->vdev;
+ struct request_queue *q = vblk->disk->queue;
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+
+ /* If capacity is too big, truncate with warning. */
+ if ((sector_t)capacity != capacity) {
+ dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
+ (unsigned long long)capacity);
+ capacity = (sector_t)-1;
+ }
+
+ size = capacity * queue_logical_block_size(q);
+ string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
+
+ dev_notice(&vdev->dev,
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ (unsigned long long)capacity,
+ queue_logical_block_size(q),
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
+}
+
+static void virtblk_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ queue_work(virtblk_wq, &vblk->config_work);
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -327,6 +373,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
/* We expect one virtqueue, for output. */
vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -477,6 +524,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ flush_work(&vblk->config_work);
+
/* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
@@ -508,27 +557,47 @@ static unsigned int features[] = {
* Use __refdata to avoid this warning.
*/
static struct virtio_driver __refdata virtio_blk = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = virtblk_probe,
- .remove = __devexit_p(virtblk_remove),
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtblk_probe,
+ .remove = __devexit_p(virtblk_remove),
+ .config_changed = virtblk_config_changed,
};
static int __init init(void)
{
+ int error;
+
+ virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ if (!virtblk_wq)
+ return -ENOMEM;
+
major = register_blkdev(0, "virtblk");
- if (major < 0)
- return major;
- return register_virtio_driver(&virtio_blk);
+ if (major < 0) {
+ error = major;
+ goto out_destroy_workqueue;
+ }
+
+ error = register_virtio_driver(&virtio_blk);
+ if (error)
+ goto out_unregister_blkdev;
+ return 0;
+
+out_unregister_blkdev:
+ unregister_blkdev(major, "virtblk");
+out_destroy_workqueue:
+ destroy_workqueue(virtblk_wq);
+ return error;
}
static void __exit fini(void)
{
unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
+ destroy_workqueue(virtblk_wq);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c73910cc28c9..5cf2993a8338 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
failed_init:
kfree(blkbk->pending_reqs);
kfree(blkbk->pending_grant_handles);
- for (i = 0; i < mmap_pages; i++) {
- if (blkbk->pending_pages[i])
- __free_page(blkbk->pending_pages[i]);
+ if (blkbk->pending_pages) {
+ for (i = 0; i < mmap_pages; i++) {
+ if (blkbk->pending_pages[i])
+ __free_page(blkbk->pending_pages[i]);
+ }
+ kfree(blkbk->pending_pages);
}
- kfree(blkbk->pending_pages);
kfree(blkbk);
blkbk = NULL;
return rc;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 34570823355b..6cc0db1bf522 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->bdev = bdev;
- vbd->size = vbd_sz(vbd);
-
if (vbd->bdev->bd_disk == NULL) {
DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
vbd->pdevice);
xen_vbd_free(vbd);
return -ENOENT;
}
+ vbd->size = vbd_sz(vbd);
if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
vbd->type |= VDISK_CDROM;
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305bf953e..8ecf4c6c2874 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hscfgcmd = result;
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.psmode = result;
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.pscmd = result;
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 16, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.gpio_gap = result;
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hscmd = result;
if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
+ if (ret)
+ return ret;
priv->btmrvl_dev.hsmode = result;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index b3f01996318f..48ad2a7ab080 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* flags pointer to flags for data
* count count of received data in bytes
*
- * Return Value: Number of bytes received
+ * Return Value: None
*/
-static unsigned int hci_uart_tty_receive(struct tty_struct *tty,
- const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
struct hci_uart *hu = (void *)tty->disc_data;
- int received;
if (!hu || tty != hu->tty)
- return -ENODEV;
+ return;
if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return -EINVAL;
+ return;
spin_lock(&hu->rx_lock);
- received = hu->proto->recv(hu, (void *) data, count);
- if (received > 0)
- hu->hdev->stat.byte_rx += received;
+ hu->proto->recv(hu, (void *) data, count);
+ hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
-
- return received;
}
static int hci_uart_register_dev(struct hci_uart *hu)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index ae15a4ddaa9b..7878da89d29e 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -627,7 +627,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- gendisk->events = DISK_EVENT_MEDIA_CHANGE;
set_capacity(gendisk, 0);
gendisk->private_data = d;
d->viocd_disk = gendisk;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 051474c65b78..34d6a1cab8de 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -163,11 +163,32 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
- unsigned long m, t;
+ unsigned long m, t, mc, base, k;
+ struct hpet __iomem *hpet = devp->hd_hpet;
+ struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
m = read_counter(&devp->hd_timer->hpet_compare);
- write_counter(t + m, &devp->hd_timer->hpet_compare);
+ mc = read_counter(&hpet->hpet_mc);
+ /* The time for the next interrupt would logically be t + m,
+ * however, if we are very unlucky and the interrupt is delayed
+ * for longer than t then we will completely miss the next
+ * interrupt if we set t + m and an application will hang.
+ * Therefore we need to make a more complex computation assuming
+ * that there exists a k for which the following is true:
+ * k * t + base < mc + delta
+ * (k + 1) * t + base > mc + delta
+ * where t is the interval in hpet ticks for the given freq,
+ * base is the theoretical start value 0 < base < t,
+ * mc is the main counter value at the time of the interrupt,
+ * delta is the time it takes to write the a value to the
+ * comparator.
+ * k may then be computed as (mc - base + delta) / t .
+ */
+ base = mc % t;
+ k = (mc - base + hpetp->hp_delta) / t;
+ write_counter(t * (k + 1) + base,
+ &devp->hd_timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 838568a7dbf5..fb68b1295373 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1677,17 +1677,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
portdev->config.max_nr_ports = 1;
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
- vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
-
vdev->config->get(vdev, offsetof(struct virtio_console_config,
max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
}
- /* Let the Host know we support multiple ports.*/
- vdev->config->finalize_features(vdev);
-
err = init_vqs(portdev);
if (err < 0) {
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 036e5865eb40..dc7c033ef587 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
/* disable interrupts in CMT block */
sh_cmt_write(p, CMCSR, 0);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
/* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 17296288a205..808135768617 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index b60a4c263686..faf7c5217848 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
old_index = stat->last_index;
new_index = freq_table_get_index(stat, freq->new);
- cpufreq_stats_update(freq->cpu);
- if (old_index == new_index)
+ /* We can't do stat->time_in_state[-1]= .. */
+ if (old_index == -1 || new_index == -1)
return 0;
- if (old_index == -1 || new_index == -1)
+ cpufreq_stats_update(freq->cpu);
+
+ if (old_index == new_index)
return 0;
spin_lock(&cpufreq_stats_lock);
@@ -387,6 +389,7 @@ static void __exit cpufreq_stats_exit(void)
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
cpufreq_stats_free_table(cpu);
+ cpufreq_stats_free_sysfs(cpu);
}
}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 83479b6fb9a1..bce576d7478e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
}
res = transition_fid_vid(data, fid, vid);
+ if (res)
+ return res;
+
freqs.new = find_khz_freq_from_fid(data->currfid);
for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
/* get MSR index for hardware pstate transition */
pstate = index & HW_PSTATE_MASK;
if (pstate > data->max_hw_pstate)
- return 0;
+ return -EINVAL;
+
freqs.old = find_khz_freq_from_pstate(data->powernow_table,
data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f508690eb958..c47f3d09c1ee 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
unsigned int power_usage = -1;
int i;
int multiplier;
+ struct timespec t;
if (data->needs_update) {
menu_update(dev);
@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
return 0;
/* determine the expected residency time, round up */
+ t = ktime_to_timespec(tick_nohz_get_sleep_length());
data->expected_us =
- DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
+ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
data->bucket = which_bucket(data->expected_us);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a572600e44eb..25cf327cd1cb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,16 +200,18 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
+ tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
- Output Hub) which is for IVI(In-Vehicle Infotainment) use.
- ML7213 is companion chip for Intel Atom E6xx series.
- ML7213 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7213 and ML7223.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+ for MP(Media Phone) use.
+ ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
new file mode 100644
index 000000000000..a4af8589330c
--- /dev/null
+++ b/drivers/dma/TODO
@@ -0,0 +1,14 @@
+TODO for slave dma
+
+1. Move remaining drivers to use new slave interface
+2. Remove old slave pointer machansim
+3. Make issue_pending to start the transaction in below drivers
+ - mpc512x_dma
+ - imx-dma
+ - imx-sdma
+ - mxs-dma.c
+ - dw_dmac
+ - intel_mid_dma
+ - ste_dma40
+4. Check other subsystems for dma drivers and merge/move to dmaengine
+5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 235f53bf494e..36144f88d718 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -37,8 +37,8 @@
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
#define ATC_DEFAULT_CTRLA (0)
-#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
- |ATC_DIF(1))
+#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
+ |ATC_DIF(AT_DMA_MEM_IF))
/*
* Initial number of descriptors to allocate for each channel. This could
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
}
/**
+ * atc_desc_chain - build chain adding a descripor
+ * @first: address of first descripor of the chain
+ * @prev: address of previous descripor of the chain
+ * @desc: descriptor to queue
+ *
+ * Called from prep_* functions
+ */
+static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
+ struct at_desc *desc)
+{
+ if (!(*first)) {
+ *first = desc;
+ } else {
+ /* inform the HW lli about chaining */
+ (*prev)->lli.dscr = desc->txd.phys;
+ /* insert the link descriptor to the LD ring */
+ list_add_tail(&desc->desc_node,
+ &(*first)->tx_list);
+ }
+ *prev = desc;
+}
+
+/**
* atc_assign_cookie - compute and assign new cookie
* @atchan: channel we work on
* @desc: descriptor to assign cookie for
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
atchan->completed_cookie = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
- callback(param);
+ /* for cyclic transfers,
+ * no need to replay callback function while stopping */
+ if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+ }
dma_run_dependencies(txd);
}
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
atc_chain_complete(atchan, bad_desc);
}
+/**
+ * atc_handle_cyclic - at the end of a period, run callback function
+ * @atchan: channel used for cyclic operations
+ *
+ * Called with atchan->lock held and bh disabled
+ */
+static void atc_handle_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_desc *first = atc_first_active(atchan);
+ struct dma_async_tx_descriptor *txd = &first->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ dev_vdbg(chan2dev(&atchan->chan_common),
+ "new cyclic period llp 0x%08x\n",
+ channel_readl(atchan, DSCR));
+
+ if (callback)
+ callback(param);
+}
/*-- IRQ & Tasklet ---------------------------------------------------*/
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- /* Channel cannot be enabled here */
- if (atc_chan_is_enabled(atchan)) {
- dev_err(chan2dev(&atchan->chan_common),
- "BUG: channel enabled in tasklet\n");
- return;
- }
-
spin_lock(&atchan->lock);
- if (test_and_clear_bit(0, &atchan->error_status))
+ if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
+ else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
for (i = 0; i < atdma->dma_common.chancnt; i++) {
atchan = &atdma->chan[i];
- if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
+ if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
if (pending & AT_DMA_ERR(i)) {
/* Disable channel on AHB error */
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHDR,
+ AT_DMA_RES(i) | atchan->mask);
/* Give information to tasklet */
- set_bit(0, &atchan->error_status);
+ set_bit(ATC_IS_ERROR, &atchan->status);
}
tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED;
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
ctrla = ATC_DEFAULT_CTRLA;
- ctrlb = ATC_DEFAULT_CTRLB
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
| ATC_FC_MEM2MEM;
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->txd.cookie = 0;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
}
/* First descriptor of the chain embedds additional information */
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
+ dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
+ sg_len,
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
flags);
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = atslave->reg_width;
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
+ ctrlb = ATC_IEN;
switch (direction) {
case DMA_TO_DEVICE:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
- | ATC_FC_MEM2PER;
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
reg = atslave->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> mem_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
- | ATC_FC_PER2MEM;
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
reg = atslave->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> reg_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -759,41 +777,211 @@ err_desc_get:
return NULL;
}
+/**
+ * atc_dma_cyclic_check_values
+ * Check for too big/unaligned periods and unaligned DMA buffer
+ */
+static int
+atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ if (period_len > (ATC_BTSIZE_MAX << reg_width))
+ goto err_out;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ return -EINVAL;
+}
+
+/**
+ * atc_dma_cyclic_fill_desc - Fill one period decriptor
+ */
+static int
+atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
+ unsigned int period_index, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ u32 ctrla;
+ unsigned int reg_width = atslave->reg_width;
+
+ /* prepare common CRTLA value */
+ ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ | ATC_DST_WIDTH(reg_width)
+ | ATC_SRC_WIDTH(reg_width)
+ | period_len >> reg_width;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.saddr = buf_addr + (period_len * period_index);
+ desc->lli.daddr = atslave->tx_reg;
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
+ | ATC_SRC_ADDR_MODE_INCR
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF)
+ | ATC_DIF(AT_DMA_PER_IF);
+ break;
+
+ case DMA_FROM_DEVICE:
+ desc->lli.saddr = atslave->rx_reg;
+ desc->lli.daddr = buf_addr + (period_len * period_index);
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
+ | ATC_SRC_ADDR_MODE_FIXED
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF)
+ | ATC_DIF(AT_DMA_MEM_IF);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_slave *atslave = chan->private;
+ struct at_desc *first = NULL;
+ struct at_desc *prev = NULL;
+ unsigned long was_cyclic;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
+ direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ buf_addr,
+ periods, buf_len, period_len);
+
+ if (unlikely(!atslave || !buf_len || !period_len)) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
+ return NULL;
+ }
+
+ was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
+ return NULL;
+ }
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer */
+ if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
+ period_len, direction))
+ goto err_out;
+
+ /* build cyclic linked list */
+ for (i = 0; i < periods; i++) {
+ struct at_desc *desc;
+
+ desc = atc_desc_get(atchan);
+ if (!desc)
+ goto err_desc_get;
+
+ if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
+ period_len, direction))
+ goto err_desc_get;
+
+ atc_desc_chain(&first, &prev, desc);
+ }
+
+ /* lets make a cyclic list */
+ prev->lli.dscr = first->txd.phys;
+
+ /* First descriptor of the chain embedds additional information */
+ first->txd.cookie = -EBUSY;
+ first->len = buf_len;
+
+ return &first->txd;
+
+err_desc_get:
+ dev_err(chan2dev(chan), "not enough descriptors available\n");
+ atc_desc_put(atchan, first);
+err_out:
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+ return NULL;
+}
+
+
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc, *_desc;
+ int chan_id = atchan->chan_common.chan_id;
+
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&atchan->lock);
+ if (cmd == DMA_PAUSE) {
+ spin_lock_bh(&atchan->lock);
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
+ set_bit(ATC_IS_PAUSED, &atchan->status);
- /* confirm that this channel is disabled */
- while (dma_readl(atdma, CHSR) & atchan->mask)
- cpu_relax();
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_RESUME) {
+ if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ return 0;
- /* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
+ spin_lock_bh(&atchan->lock);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ struct at_desc *desc, *_desc;
+ /*
+ * This is only called when something went wrong elsewhere, so
+ * we don't really care about the data. Just disable the
+ * channel. We still have to poll the channel enable bit due
+ * to AHB/HSB limitations.
+ */
+ spin_lock_bh(&atchan->lock);
+
+ /* disabling channel: must also remove suspend state */
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
+
+ /* confirm that this channel is disabled */
+ while (dma_readl(atdma, CHSR) & atchan->mask)
+ cpu_relax();
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&atchan->queue, &list);
+ list_splice_init(&atchan->active_list, &list);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ atc_chain_complete(atchan, desc);
+
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
+ /* if channel dedicated to cyclic operations, free it */
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+
+ spin_unlock_bh(&atchan->lock);
+ } else {
+ return -ENXIO;
+ }
return 0;
}
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
- cookie, last_complete ? last_complete : 0,
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ atc_first_active(atchan)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ ret = DMA_PAUSED;
+
+ dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
+ ret, cookie, last_complete ? last_complete : 0,
last_used ? last_used : 0);
return ret;
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
+ /* Not needed for cyclic transfers */
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ return;
+
spin_lock_bh(&atchan->lock);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
}
list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0;
+ atchan->status = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+ atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
+
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
+ dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
- }
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 495457e3dc4b..087dbf1dd39c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -103,6 +103,10 @@
/* Bitfields in CTRLB */
#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
+ /* Specify AHB interfaces */
+#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
+#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
+
#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
/*-- Channels --------------------------------------------------------*/
/**
+ * atc_status - information bits stored in channel status flag
+ *
+ * Manipulated with atomic operations.
+ */
+enum atc_status {
+ ATC_IS_ERROR = 0,
+ ATC_IS_PAUSED = 1,
+ ATC_IS_CYCLIC = 24,
+};
+
+/**
* struct at_dma_chan - internal representation of an Atmel HDMAC channel
* @chan_common: common dmaengine channel object members
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
- * @error_status: transmit error status information from irq handler
+ * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
* @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -201,7 +216,7 @@ struct at_dma_chan {
struct at_dma *device;
void __iomem *ch_regs;
u8 mask;
- unsigned long error_status;
+ unsigned long status;
struct tasklet_struct tasklet;
spinlock_t lock;
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
u32 ebci;
- /* enable interrupts on buffer chain completion & error */
- ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
+ /* enable interrupts on buffer transfer completion & error */
+ ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
| AT_DMA_ERR(atchan->chan_common.chan_id);
if (on)
dma_writel(atdma, EBCIER, ebci);
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
*/
static void set_desc_eol(struct at_desc *desc)
{
- desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+ u32 ctrlb = desc->lli.ctrlb;
+
+ ctrlb &= ~ATC_IEN;
+ ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+
+ desc->lli.ctrlb = ctrlb;
desc->lli.dscr = 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f48e54006518..af8c0b5ed70f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
-arch_initcall(coh901318_init);
+subsys_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2a2e2fa00e91..4d180ca9a1d8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *ret = NULL;
unsigned int i = 0;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
*/
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
+ unsigned long flags;
+
if (desc) {
struct dw_desc *child;
dwc_sync_desc_for_cpu(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/*----------------------------------------------------------------------*/
static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+ bool callback_required)
{
- dma_async_tx_callback callback;
- void *param;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
+ unsigned long flags;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+ spin_lock_irqsave(&dwc->lock, flags);
dwc->completed = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }
dwc_sync_desc_for_cpu(dwc, desc);
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ if (callback_required && callback)
callback(param);
}
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: XFER bit set, but channel not idle!\n");
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, true);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *child;
u32 status_xfer;
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
/*
* Clear block interrupt flag before scanning so that we don't
* miss any, and read LLP before RAW_XFER to ensure it is
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
dwc_complete_all(dw, dwc);
return;
}
- if (list_empty(&dwc->active_list))
+ if (list_empty(&dwc->active_list)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
- if (desc->lli.llp == llp)
+ /* check first descriptors addr */
+ if (desc->txd.phys == llp) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* check first descriptors llp */
+ if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
list_for_each_entry(child, &desc->tx_list, desc_node)
- if (child->lli.llp == llp)
+ if (child->lli.llp == llp) {
/* Currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
* this one must be done.
*/
- dwc_descriptor_complete(dwc, desc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc_descriptor_complete(dwc, desc, true);
+ spin_lock_irqsave(&dwc->lock, flags);
}
dev_err(chan2dev(&dwc->chan),
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_move(dwc->queue.next, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *bad_desc;
struct dw_desc *child;
+ unsigned long flags;
dwc_scan_descriptors(dw, dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+
/*
* The descriptor currently at the head of the active list is
* borked. Since we don't have any way to report errors, we'll
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
/* Pretend the descriptor completed successfully */
- dwc_descriptor_complete(dwc, bad_desc);
+ dwc_descriptor_complete(dwc, bad_desc, true);
}
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_block, u32 status_err, u32 status_xfer)
{
+ unsigned long flags;
+
if (status_block & dwc->mask) {
void (*callback)(void *param);
void *callback_param;
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
- if (callback) {
- spin_unlock(&dwc->lock);
+
+ if (callback)
callback(callback_param);
- spin_lock(&dwc->lock);
- }
}
/*
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
"interrupt, stopping DMA transfer\n",
status_xfer ? "xfer" : "error");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
channel_readl(dwc, SAR),
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
for (i = 0; i < dwc->cdesc->periods; i++)
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
- spin_lock(&dwc->lock);
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
dwc_handle_cyclic(dw, dwc, status_block, status_err,
status_xfer);
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
- spin_unlock(&dwc->lock);
}
/*
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
struct dw_desc *desc = txd_to_dw_desc(tx);
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
cookie = dwc_assign_cookie(dwc, desc);
/*
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &dwc->queue);
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return cookie;
}
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
+ u32 len, dlen, mem;
+
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc) {
dev_err(chan2dev(chan),
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto err_desc_get;
}
- mem = sg_phys(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
-
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- desc->lli.ctlhi = len >> mem_width;
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;
if (!first) {
first = desc;
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
}
break;
case DMA_FROM_DEVICE:
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
-
- desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
- goto err_desc_get;
- }
+ u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
+slave_sg_fromdev_fill_desc:
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- desc->lli.ctlhi = len >> reg_width;
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;
if (!first) {
first = desc;
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
}
break;
default:
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
+ u32 cfglo;
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ if (cmd == DMA_PAUSE) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&dwc->lock);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
- channel_clear_bit(dw, CH_EN, dwc->mask);
+ dwc->paused = true;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_RESUME) {
+ if (!dwc->paused)
+ return 0;
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ spin_lock_irqsave(&dwc->lock, flags);
- /* active_list entries will end up before queued entries */
- list_splice_init(&dwc->queue, &list);
- list_splice_init(&dwc->active_list, &list);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dwc->paused = false;
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dwc->paused = false;
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dwc->queue, &list);
+ list_splice_init(&dwc->active_list, &list);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc, false);
+ } else
+ return -ENXIO;
return 0;
}
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ dwc_first_active(dwc)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (dwc->paused)
+ return DMA_PAUSED;
return ret;
}
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- spin_lock_bh(&dwc->lock);
if (!list_empty(&dwc->queue))
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
int i;
u32 cfghi;
u32 cfglo;
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
break;
}
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = ++dwc->descs_allocated;
}
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
LIST_HEAD(list);
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&dwc->queue));
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
return -ENODEV;
}
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
/* assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_readl(dwc, LLP),
channel_readl(dwc, CTL_HI),
channel_readl(dwc, CTL_LO));
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
}
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_set_bit(dw, CH_EN, dwc->mask);
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned int reg_width;
unsigned int periods;
unsigned int i;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(&dwc->chan),
"queue and/or active list are not empty\n");
return ERR_PTR(-EBUSY);
}
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
if (was_cyclic) {
dev_dbg(chan2dev(&dwc->chan),
"channel already prepared for cyclic DMA\n");
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_cyclic_desc *cdesc = dwc->cdesc;
int i;
+ unsigned long flags;
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
if (!cdesc)
return;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
for (i = 0; i < cdesc->periods; i++)
dwc_desc_put(dwc, cdesc->desc[i]);
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821527f8..c3419518d701 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -2,6 +2,7 @@
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -138,6 +139,7 @@ struct dw_dma_chan {
void __iomem *ch_regs;
u8 mask;
u8 priority;
+ bool paused;
spinlock_t lock;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3d4ec38b9b62..f653517ef744 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err)
goto err_dma;
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
@@ -1322,6 +1321,9 @@ err_enable_device:
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
middma_shutdown(pdev);
pci_dev_put(pdev);
kfree(device);
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
static int dma_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_suspend(pci_dev, PMSG_SUSPEND);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = SUSPENDED;
+ return 0;
}
static int dma_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_resume(pci_dev);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = RUNNING;
+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+ return 0;
}
static int dma_runtime_idle(struct device *dev)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index f4a51d4d0349..5d65f8377971 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat_ring_ent **ring;
u64 status;
int order;
+ int i = 0;
/* have we already been set up? */
if (ioat->ring)
@@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
- udelay(5);
- status = ioat_chansts(chan);
+ do {
+ udelay(1);
+ status = ioat_chansts(chan);
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
if (is_ioat_active(status) || is_ioat_idle(status)) {
set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6b01f535b29..e03f811a83dd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a25f5f61e0e0..954e334e01bb 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memcpy_slot_count(len);
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memset_slot_count(len);
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan->device->common.dev,
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 8d8fef1480a9..ff5b38f9d45b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -77,10 +77,10 @@ struct pch_dma_regs {
u32 dma_ctl0;
u32 dma_ctl1;
u32 dma_ctl2;
- u32 reserved1;
+ u32 dma_ctl3;
u32 dma_sts0;
u32 dma_sts1;
- u32 reserved2;
+ u32 dma_sts2;
u32 reserved3;
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
@@ -130,6 +130,7 @@ struct pch_dma {
#define PCH_DMA_CTL0 0x00
#define PCH_DMA_CTL1 0x04
#define PCH_DMA_CTL2 0x08
+#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
@@ -138,7 +139,8 @@ struct pch_dma {
#define dma_writel(pd, name, val) \
writel((val), (pd)->membase + PCH_DMA_##name)
-static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
+static inline
+struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct pch_dma_desc, txd);
}
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
return chan->dev->device.parent;
}
-static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->active_list,
struct pch_dma_desc, desc_node);
}
-static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->queue,
struct pch_dma_desc, desc_node);
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
- if (pd_chan->dir == DMA_TO_DEVICE)
- val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS);
- else
- val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS));
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+ val = dma_readl(pd, CTL3);
- dma_writel(pd, CTL0, val);
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL3, val);
+ }
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
chan->chan_id, val);
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
- val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+
+ val = dma_readl(pd, CTL3);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
- dma_writel(pd, CTL0, val);
+ dma_writel(pd, CTL3, val);
+
+ }
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
{
- struct pch_dma *pd = to_pd(pd_chan->chan.device);
- u32 val;
-
if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan),
"BUG: Attempt to start non-idle channel\n");
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
channel_writel(pd_chan, NEXT, desc->txd.phys);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
}
-
- val = dma_readl(pd, CTL2);
- val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
- dma_writel(pd, CTL2, val);
}
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
{
struct pch_dma_desc *desc, *_d;
struct pch_dma_desc *ret = NULL;
- int i;
+ int i = 0;
spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
spin_unlock_bh(&pd_chan->lock);
pdc_enable_irq(chan, 1);
- pdc_set_dir(chan);
return pd_chan->descs_allocated;
}
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
else
return NULL;
+ pd_chan->dir = direction;
+ pdc_set_dir(chan);
+
for_each_sg(sgl, sg, sg_len, i) {
desc = pdc_desc_get(pd_chan);
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
+ pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
+ dma_writel(pd, CTL3, pd->regs.dma_ctl3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->membase = &regs->desc[i];
- pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
spin_lock_init(&pd_chan->lock);
INIT_LIST_HEAD(&pd_chan->active_list);
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
+#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
+#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
+#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
+#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
+#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
-static const struct pci_device_id pch_dma_id_table[] = {
+DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
{ 0, },
};
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 3b0247e74cc4..fc457a7e8832 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
dma_dest, dma_src, src_cnt));
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(ppc440spe_chan->device->common.dev,
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
dst, src, src_cnt));
BUG_ON(!len);
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
BUG_ON(!src_cnt);
if (src_cnt == 1 && dst[1] == src[0]) {
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 636e40925b16..028330044201 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
- } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
+ } else {
dmae_init(sh_chan);
}
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ platform_set_drvdata(pdev, shdev);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1219,6 +1221,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
} else {
do {
for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+ if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
+ irq_cap = 1;
+ break;
+ }
+
if ((errirq_res->flags & IORESOURCE_BITS) ==
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
@@ -1228,15 +1235,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
"Found IRQ %d for channel %d\n",
i, irq_cnt);
chan_irq[irq_cnt++] = i;
-
- if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
- break;
}
- if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
- irq_cap = 1;
+ if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
break;
- }
+
chanirq_res = platform_get_resource(pdev,
IORESOURCE_IRQ, ++irqres);
} while (irq_cnt < pdata->channel_num && chanirq_res);
@@ -1256,7 +1259,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- platform_set_drvdata(pdev, shdev);
dma_async_device_register(&shdev->common);
return err;
@@ -1278,6 +1280,8 @@ rst_err:
if (dmars)
iounmap(shdev->dmars);
+
+ platform_set_drvdata(pdev, NULL);
emapdmars:
iounmap(shdev->chan_reg);
synchronize_rcu();
@@ -1316,6 +1320,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
iounmap(shdev->dmars);
iounmap(shdev->chan_reg);
+ platform_set_drvdata(pdev, NULL);
+
synchronize_rcu();
kfree(shdev);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 94ee15dd3aed..8f222d4db7de 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
if (chan->runtime_addr)
return chan->runtime_addr;
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
-arch_initcall(stedma40_init);
+subsys_initcall(stedma40_init);
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index f032e446fc11..bfe723266fd8 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -108,7 +108,9 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
+#ifdef CONFIG_ACPI
int i;
+#endif
ibft_addr = NULL;
#ifdef CONFIG_ACPI
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b57ec09af891..2967002a9f82 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -86,6 +86,22 @@ config GPIO_IT8761E
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+config GPIO_EXYNOS4
+ def_bool y
+ depends on CPU_EXYNOS4210
+
+config GPIO_PLAT_SAMSUNG
+ def_bool y
+ depends on SAMSUNG_GPIOLIB_4BIT
+
+config GPIO_S5PC100
+ def_bool y
+ depends on CPU_S5PC100
+
+config GPIO_S5PV210
+ def_bool y
+ depends on CPU_S5PV210
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -303,7 +319,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO
+ depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -334,13 +350,19 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "PCH GPIO of Intel Topcliff"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
depends on PCI && X86
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7223 is companion chip for Intel Atom E6xx series.
+ ML7223 is completely compatible for Intel EG20T PCH.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on PCI
@@ -424,4 +446,11 @@ config AB8500_GPIO
depends on AB8500_CORE && BROKEN
help
Select this to enable the AB8500 IC GPIO driver
+
+config GPIO_TPS65910
+ bool "TPS65910 GPIO"
+ depends on MFD_TPS65910
+ help
+ Select this option to enable GPIO driver for the TPS65910
+ chip family.
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d92ce3a62ae5..b605f8ec6fbe 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
+obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
+obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
+obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
+obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX730X) += max730x.o
obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -16,6 +20,7 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_74X164) += 74x164.o
+obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
@@ -34,9 +39,12 @@ obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_MACH_U300) += gpio-u300.o
+obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
+obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
new file mode 100644
index 000000000000..9029835112e7
--- /dev/null
+++ b/drivers/gpio/gpio-exynos4.c
@@ -0,0 +1,386 @@
+/* linux/arch/arm/mach-exynos4/gpiolib.c
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+int s3c_gpio_setpull_exynos4(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t pull)
+{
+ if (pull == S3C_GPIO_PULL_UP)
+ pull = 3;
+
+ return s3c_gpio_setpull_updown(chip, off, pull);
+}
+
+s3c_gpio_pull_t s3c_gpio_getpull_exynos4(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ s3c_gpio_pull_t pull;
+
+ pull = s3c_gpio_getpull_updown(chip, off);
+ if (pull == 3)
+ pull = S3C_GPIO_PULL_UP;
+
+ return pull;
+}
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_exynos4,
+ .get_pull = s3c_gpio_getpull_exynos4,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_exynos4,
+ .get_pull = s3c_gpio_getpull_exynos4,
+};
+
+/*
+ * Following are the gpio banks in v310.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip exynos4_gpio_part1_4bit[] = {
+ {
+ .chip = {
+ .base = EXYNOS4_GPA0(0),
+ .ngpio = EXYNOS4_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPA1(0),
+ .ngpio = EXYNOS4_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPB(0),
+ .ngpio = EXYNOS4_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPC0(0),
+ .ngpio = EXYNOS4_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPC1(0),
+ .ngpio = EXYNOS4_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPD0(0),
+ .ngpio = EXYNOS4_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPD1(0),
+ .ngpio = EXYNOS4_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE0(0),
+ .ngpio = EXYNOS4_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE1(0),
+ .ngpio = EXYNOS4_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE2(0),
+ .ngpio = EXYNOS4_GPIO_E2_NR,
+ .label = "GPE2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE3(0),
+ .ngpio = EXYNOS4_GPIO_E3_NR,
+ .label = "GPE3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE4(0),
+ .ngpio = EXYNOS4_GPIO_E4_NR,
+ .label = "GPE4",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF0(0),
+ .ngpio = EXYNOS4_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF1(0),
+ .ngpio = EXYNOS4_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF2(0),
+ .ngpio = EXYNOS4_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF3(0),
+ .ngpio = EXYNOS4_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ },
+};
+
+static struct s3c_gpio_chip exynos4_gpio_part2_4bit[] = {
+ {
+ .chip = {
+ .base = EXYNOS4_GPJ0(0),
+ .ngpio = EXYNOS4_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPJ1(0),
+ .ngpio = EXYNOS4_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK0(0),
+ .ngpio = EXYNOS4_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK1(0),
+ .ngpio = EXYNOS4_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK2(0),
+ .ngpio = EXYNOS4_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK3(0),
+ .ngpio = EXYNOS4_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL0(0),
+ .ngpio = EXYNOS4_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL1(0),
+ .ngpio = EXYNOS4_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL2(0),
+ .ngpio = EXYNOS4_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY0(0),
+ .ngpio = EXYNOS4_GPIO_Y0_NR,
+ .label = "GPY0",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY1(0),
+ .ngpio = EXYNOS4_GPIO_Y1_NR,
+ .label = "GPY1",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY2(0),
+ .ngpio = EXYNOS4_GPIO_Y2_NR,
+ .label = "GPY2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY3(0),
+ .ngpio = EXYNOS4_GPIO_Y3_NR,
+ .label = "GPY3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY4(0),
+ .ngpio = EXYNOS4_GPIO_Y4_NR,
+ .label = "GPY4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY5(0),
+ .ngpio = EXYNOS4_GPIO_Y5_NR,
+ .label = "GPY5",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY6(0),
+ .ngpio = EXYNOS4_GPIO_Y6_NR,
+ .label = "GPY6",
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC00),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = EXYNOS4_GPX0(0),
+ .ngpio = EXYNOS4_GPIO_X0_NR,
+ .label = "GPX0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC20),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = EXYNOS4_GPX1(0),
+ .ngpio = EXYNOS4_GPIO_X1_NR,
+ .label = "GPX1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC40),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = EXYNOS4_GPX2(0),
+ .ngpio = EXYNOS4_GPIO_X2_NR,
+ .label = "GPX2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC60),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = EXYNOS4_GPX3(0),
+ .ngpio = EXYNOS4_GPIO_X3_NR,
+ .label = "GPX3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static struct s3c_gpio_chip exynos4_gpio_part3_4bit[] = {
+ {
+ .chip = {
+ .base = EXYNOS4_GPZ(0),
+ .ngpio = EXYNOS4_GPIO_Z_NR,
+ .label = "GPZ",
+ },
+ },
+};
+
+static __init int exynos4_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip;
+ int i;
+ int group = 0;
+ int nr_chips;
+
+ /* GPIO part 1 */
+
+ chip = exynos4_gpio_part1_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part1_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ /* Assign the GPIO interrupt group */
+ chip->group = group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO1 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part1_4bit, nr_chips);
+
+ /* GPIO part 2 */
+
+ chip = exynos4_gpio_part2_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part2_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ /* Assign the GPIO interrupt group */
+ chip->group = group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO2 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part2_4bit, nr_chips);
+
+ /* GPIO part 3 */
+
+ chip = exynos4_gpio_part3_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part3_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ /* Assign the GPIO interrupt group */
+ chip->group = group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO3 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part3_4bit, nr_chips);
+ s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
+ s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
+
+ return 0;
+}
+core_initcall(exynos4_gpiolib_init);
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
new file mode 100644
index 000000000000..2c212c732d76
--- /dev/null
+++ b/drivers/gpio/gpio-nomadik.c
@@ -0,0 +1,1087 @@
+/*
+ * Generic GPIO driver for logic cells found in the Nomadik SoC
+ *
+ * Copyright (C) 2008,2009 STMicroelectronics
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+
+#include <asm/mach/irq.h>
+
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+
+/*
+ * The GPIO module in the Nomadik family of Systems-on-Chip is an
+ * AMBA device, managing 32 pins and alternate functions. The logic block
+ * is currently used in the Nomadik and ux500.
+ *
+ * Symbols in this file are called "nmk_gpio" for "nomadik gpio"
+ */
+
+#define NMK_GPIO_PER_CHIP 32
+
+struct nmk_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *addr;
+ struct clk *clk;
+ unsigned int bank;
+ unsigned int parent_irq;
+ int secondary_parent_irq;
+ u32 (*get_secondary_status)(unsigned int bank);
+ void (*set_ioforce)(bool enable);
+ spinlock_t lock;
+ bool sleepmode;
+ /* Keep track of configured edges */
+ u32 edge_rising;
+ u32 edge_falling;
+ u32 real_wake;
+ u32 rwimsc;
+ u32 fwimsc;
+ u32 slpm;
+ u32 enabled;
+ u32 pull_up;
+};
+
+static struct nmk_gpio_chip *
+nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+
+static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
+
+#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)
+
+static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int gpio_mode)
+{
+ u32 bit = 1 << offset;
+ u32 afunc, bfunc;
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
+ if (gpio_mode & NMK_GPIO_ALT_A)
+ afunc |= bit;
+ if (gpio_mode & NMK_GPIO_ALT_B)
+ bfunc |= bit;
+ writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA);
+ writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
+}
+
+static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, enum nmk_gpio_slpm mode)
+{
+ u32 bit = 1 << offset;
+ u32 slpm;
+
+ slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
+ if (mode == NMK_GPIO_SLPM_NOCHANGE)
+ slpm |= bit;
+ else
+ slpm &= ~bit;
+ writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
+}
+
+static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, enum nmk_gpio_pull pull)
+{
+ u32 bit = 1 << offset;
+ u32 pdis;
+
+ pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
+ if (pull == NMK_GPIO_PULL_NONE) {
+ pdis |= bit;
+ nmk_chip->pull_up &= ~bit;
+ } else {
+ pdis &= ~bit;
+ }
+
+ writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
+
+ if (pull == NMK_GPIO_PULL_UP) {
+ nmk_chip->pull_up |= bit;
+ writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
+ } else if (pull == NMK_GPIO_PULL_DOWN) {
+ nmk_chip->pull_up &= ~bit;
+ writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+ }
+}
+
+static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset)
+{
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+}
+
+static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int val)
+{
+ if (val)
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
+ else
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
+}
+
+static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int val)
+{
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
+static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int gpio_mode,
+ bool glitch)
+{
+ u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
+ u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+
+ if (glitch && nmk_chip->set_ioforce) {
+ u32 bit = BIT(offset);
+
+ /* Prevent spurious wakeups */
+ writel(rwimsc & ~bit, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ writel(fwimsc & ~bit, nmk_chip->addr + NMK_GPIO_FWIMSC);
+
+ nmk_chip->set_ioforce(true);
+ }
+
+ __nmk_gpio_set_mode(nmk_chip, offset, gpio_mode);
+
+ if (glitch && nmk_chip->set_ioforce) {
+ nmk_chip->set_ioforce(false);
+
+ writel(rwimsc, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ writel(fwimsc, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ }
+}
+
+static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
+ pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
+{
+ static const char *afnames[] = {
+ [NMK_GPIO_ALT_GPIO] = "GPIO",
+ [NMK_GPIO_ALT_A] = "A",
+ [NMK_GPIO_ALT_B] = "B",
+ [NMK_GPIO_ALT_C] = "C"
+ };
+ static const char *pullnames[] = {
+ [NMK_GPIO_PULL_NONE] = "none",
+ [NMK_GPIO_PULL_UP] = "up",
+ [NMK_GPIO_PULL_DOWN] = "down",
+ [3] /* illegal */ = "??"
+ };
+ static const char *slpmnames[] = {
+ [NMK_GPIO_SLPM_INPUT] = "input/wakeup",
+ [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
+ };
+
+ int pin = PIN_NUM(cfg);
+ int pull = PIN_PULL(cfg);
+ int af = PIN_ALT(cfg);
+ int slpm = PIN_SLPM(cfg);
+ int output = PIN_DIR(cfg);
+ int val = PIN_VAL(cfg);
+ bool glitch = af == NMK_GPIO_ALT_C;
+
+ dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: af %s, pull %s, slpm %s (%s%s)\n",
+ pin, cfg, afnames[af], pullnames[pull], slpmnames[slpm],
+ output ? "output " : "input",
+ output ? (val ? "high" : "low") : "");
+
+ if (sleep) {
+ int slpm_pull = PIN_SLPM_PULL(cfg);
+ int slpm_output = PIN_SLPM_DIR(cfg);
+ int slpm_val = PIN_SLPM_VAL(cfg);
+
+ af = NMK_GPIO_ALT_GPIO;
+
+ /*
+ * The SLPM_* values are normal values + 1 to allow zero to
+ * mean "same as normal".
+ */
+ if (slpm_pull)
+ pull = slpm_pull - 1;
+ if (slpm_output)
+ output = slpm_output - 1;
+ if (slpm_val)
+ val = slpm_val - 1;
+
+ dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n",
+ pin,
+ slpm_pull ? pullnames[pull] : "same",
+ slpm_output ? (output ? "output" : "input") : "same",
+ slpm_val ? (val ? "high" : "low") : "same");
+ }
+
+ if (output)
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+ else {
+ __nmk_gpio_make_input(nmk_chip, offset);
+ __nmk_gpio_set_pull(nmk_chip, offset, pull);
+ }
+
+ /*
+ * If we've backed up the SLPM registers (glitch workaround), modify
+ * the backups since they will be restored.
+ */
+ if (slpmregs) {
+ if (slpm == NMK_GPIO_SLPM_NOCHANGE)
+ slpmregs[nmk_chip->bank] |= BIT(offset);
+ else
+ slpmregs[nmk_chip->bank] &= ~BIT(offset);
+ } else
+ __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
+
+ __nmk_gpio_set_mode_safe(nmk_chip, offset, af, glitch);
+}
+
+/*
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers
+ * - Set SLPM=0 for the IOs you want to switch and others to 1
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers
+ * - Any spurious wake up event during switch sequence to be ignored and
+ * cleared
+ */
+static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+ unsigned int temp = slpm[i];
+
+ if (!chip)
+ break;
+
+ slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
+ writel(temp, chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ writel(slpm[i], chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
+{
+ static unsigned int slpm[NUM_BANKS];
+ unsigned long flags;
+ bool glitch = false;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) {
+ glitch = true;
+ break;
+ }
+ }
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+
+ if (glitch) {
+ memset(slpm, 0xff, sizeof(slpm));
+
+ for (i = 0; i < num; i++) {
+ int pin = PIN_NUM(cfgs[i]);
+ int offset = pin % NMK_GPIO_PER_CHIP;
+
+ if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C)
+ slpm[pin / NMK_GPIO_PER_CHIP] &= ~BIT(offset);
+ }
+
+ nmk_gpio_glitch_slpm_init(slpm);
+ }
+
+ for (i = 0; i < num; i++) {
+ struct nmk_gpio_chip *nmk_chip;
+ int pin = PIN_NUM(cfgs[i]);
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin));
+ if (!nmk_chip) {
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_lock(&nmk_chip->lock);
+ __nmk_config_pin(nmk_chip, pin - nmk_chip->chip.base,
+ cfgs[i], sleep, glitch ? slpm : NULL);
+ spin_unlock(&nmk_chip->lock);
+ }
+
+ if (glitch)
+ nmk_gpio_glitch_slpm_restore(slpm);
+
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return ret;
+}
+
+/**
+ * nmk_config_pin - configure a pin's mux attributes
+ * @cfg: pin confguration
+ *
+ * Configures a pin's mode (alternate function or GPIO), its pull up status,
+ * and its sleep mode based on the specified configuration. The @cfg is
+ * usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
+ * are constructed using, and can be further enhanced with, the macros in
+ * plat/pincfg.h.
+ *
+ * If a pin's mode is set to GPIO, it is configured as an input to avoid
+ * side-effects. The gpio can be manipulated later using standard GPIO API
+ * calls.
+ */
+int nmk_config_pin(pin_cfg_t cfg, bool sleep)
+{
+ return __nmk_config_pins(&cfg, 1, sleep);
+}
+EXPORT_SYMBOL(nmk_config_pin);
+
+/**
+ * nmk_config_pins - configure several pins at once
+ * @cfgs: array of pin configurations
+ * @num: number of elments in the array
+ *
+ * Configures several pins using nmk_config_pin(). Refer to that function for
+ * further information.
+ */
+int nmk_config_pins(pin_cfg_t *cfgs, int num)
+{
+ return __nmk_config_pins(cfgs, num, false);
+}
+EXPORT_SYMBOL(nmk_config_pins);
+
+int nmk_config_pins_sleep(pin_cfg_t *cfgs, int num)
+{
+ return __nmk_config_pins(cfgs, num, true);
+}
+EXPORT_SYMBOL(nmk_config_pins_sleep);
+
+/**
+ * nmk_gpio_set_slpm() - configure the sleep mode of a pin
+ * @gpio: pin number
+ * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
+ *
+ * This register is actually in the pinmux layer, not the GPIO block itself.
+ * The GPIO1B_SLPM register defines the GPIO mode when SLEEP/DEEP-SLEEP
+ * mode is entered (i.e. when signal IOFORCE is HIGH by the platform code).
+ * Each GPIO can be configured to be forced into GPIO mode when IOFORCE is
+ * HIGH, overriding the normal setting defined by GPIO_AFSELx registers.
+ * When IOFORCE returns LOW (by software, after SLEEP/DEEP-SLEEP exit),
+ * the GPIOs return to the normal setting defined by GPIO_AFSELx registers.
+ *
+ * If @mode is NMK_GPIO_SLPM_INPUT, the corresponding GPIO is switched to GPIO
+ * mode when signal IOFORCE is HIGH (i.e. when SLEEP/DEEP-SLEEP mode is
+ * entered) regardless of the altfunction selected. Also wake-up detection is
+ * ENABLED.
+ *
+ * If @mode is NMK_GPIO_SLPM_NOCHANGE, the corresponding GPIO remains
+ * controlled by NMK_GPIO_DATC, NMK_GPIO_DATS, NMK_GPIO_DIR, NMK_GPIO_PDIS
+ * (for altfunction GPIO) or respective on-chip peripherals (for other
+ * altfuncs) when IOFORCE is HIGH. Also wake-up detection DISABLED.
+ *
+ * Note that enable_irq_wake() will automatically enable wakeup detection.
+ */
+int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base, mode);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return 0;
+}
+
+/**
+ * nmk_gpio_set_pull() - enable/disable pull up/down on a gpio
+ * @gpio: pin number
+ * @pull: one of NMK_GPIO_PULL_DOWN, NMK_GPIO_PULL_UP, and NMK_GPIO_PULL_NONE
+ *
+ * Enables/disables pull up/down on a specified pin. This only takes effect if
+ * the pin is configured as an input (either explicitly or by the alternate
+ * function).
+ *
+ * NOTE: If enabling the pull up/down, the caller must ensure that the GPIO is
+ * configured as an input. Otherwise, due to the way the controller registers
+ * work, this function will change the value output on the pin.
+ */
+int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ __nmk_gpio_set_pull(nmk_chip, gpio - nmk_chip->chip.base, pull);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+
+/* Mode functions */
+/**
+ * nmk_gpio_set_mode() - set the mux mode of a gpio pin
+ * @gpio: pin number
+ * @gpio_mode: one of NMK_GPIO_ALT_GPIO, NMK_GPIO_ALT_A,
+ * NMK_GPIO_ALT_B, and NMK_GPIO_ALT_C
+ *
+ * Sets the mode of the specified pin to one of the alternate functions or
+ * plain GPIO.
+ */
+int nmk_gpio_set_mode(int gpio, int gpio_mode)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ __nmk_gpio_set_mode(nmk_chip, gpio - nmk_chip->chip.base, gpio_mode);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(nmk_gpio_set_mode);
+
+int nmk_gpio_get_mode(int gpio)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ u32 afunc, bfunc, bit;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ bit = 1 << (gpio - nmk_chip->chip.base);
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
+
+ return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
+EXPORT_SYMBOL(nmk_gpio_get_mode);
+
+
+/* IRQ functions */
+static inline int nmk_gpio_get_bitmask(int gpio)
+{
+ return 1 << (gpio % 32);
+}
+
+static void nmk_gpio_irq_ack(struct irq_data *d)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ if (!nmk_chip)
+ return;
+ writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
+}
+
+enum nmk_gpio_irq_type {
+ NORMAL,
+ WAKE,
+};
+
+static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
+ int gpio, enum nmk_gpio_irq_type which,
+ bool enable)
+{
+ u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC;
+ u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+ u32 reg;
+
+ /* we must individually set/clear the two edges */
+ if (nmk_chip->edge_rising & bitmask) {
+ reg = readl(nmk_chip->addr + rimsc);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + rimsc);
+ }
+ if (nmk_chip->edge_falling & bitmask) {
+ reg = readl(nmk_chip->addr + fimsc);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + fimsc);
+ }
+}
+
+static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
+ int gpio, bool on)
+{
+ if (nmk_chip->sleepmode) {
+ __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
+ on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
+ : NMK_GPIO_SLPM_WAKEUP_DISABLE);
+ }
+
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
+}
+
+static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return -EINVAL;
+
+ if (enable)
+ nmk_chip->enabled |= bitmask;
+ else
+ nmk_chip->enabled &= ~bitmask;
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, enable);
+
+ if (!(nmk_chip->real_wake & bitmask))
+ __nmk_gpio_set_wake(nmk_chip, gpio, enable);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return 0;
+}
+
+static void nmk_gpio_irq_mask(struct irq_data *d)
+{
+ nmk_gpio_irq_maskunmask(d, false);
+}
+
+static void nmk_gpio_irq_unmask(struct irq_data *d)
+{
+ nmk_gpio_irq_maskunmask(d, true);
+}
+
+static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+ int gpio;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ if (!nmk_chip)
+ return -EINVAL;
+ bitmask = nmk_gpio_get_bitmask(gpio);
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ if (!(nmk_chip->enabled & bitmask))
+ __nmk_gpio_set_wake(nmk_chip, gpio, on);
+
+ if (on)
+ nmk_chip->real_wake |= bitmask;
+ else
+ nmk_chip->real_wake &= ~bitmask;
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return 0;
+}
+
+static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ bool enabled, wake = irqd_is_wakeup_set(d);
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ enabled = nmk_chip->enabled & bitmask;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, false);
+
+ if (enabled || wake)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, false);
+
+ nmk_chip->edge_rising &= ~bitmask;
+ if (type & IRQ_TYPE_EDGE_RISING)
+ nmk_chip->edge_rising |= bitmask;
+
+ nmk_chip->edge_falling &= ~bitmask;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ nmk_chip->edge_falling |= bitmask;
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, true);
+
+ if (enabled || wake)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, true);
+
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip nmk_gpio_irq_chip = {
+ .name = "Nomadik-GPIO",
+ .irq_ack = nmk_gpio_irq_ack,
+ .irq_mask = nmk_gpio_irq_mask,
+ .irq_unmask = nmk_gpio_irq_unmask,
+ .irq_set_type = nmk_gpio_irq_set_type,
+ .irq_set_wake = nmk_gpio_irq_set_wake,
+};
+
+static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
+ u32 status)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ struct irq_chip *host_chip = irq_get_chip(irq);
+ unsigned int first_irq;
+
+ chained_irq_enter(host_chip, desc);
+
+ nmk_chip = irq_get_handler_data(irq);
+ first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
+ while (status) {
+ int bit = __ffs(status);
+
+ generic_handle_irq(first_irq + bit);
+ status &= ~BIT(bit);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
+ u32 status = readl(nmk_chip->addr + NMK_GPIO_IS);
+
+ __nmk_gpio_irq_handler(irq, desc, status);
+}
+
+static void nmk_gpio_secondary_irq_handler(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
+ u32 status = nmk_chip->get_secondary_status(nmk_chip->bank);
+
+ __nmk_gpio_irq_handler(irq, desc, status);
+}
+
+static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
+{
+ unsigned int first_irq;
+ int i;
+
+ first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
+ for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) {
+ irq_set_chip_and_handler(i, &nmk_gpio_irq_chip,
+ handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ irq_set_chip_data(i, nmk_chip);
+ irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
+ irq_set_handler_data(nmk_chip->parent_irq, nmk_chip);
+
+ if (nmk_chip->secondary_parent_irq >= 0) {
+ irq_set_chained_handler(nmk_chip->secondary_parent_irq,
+ nmk_gpio_secondary_irq_handler);
+ irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip);
+ }
+
+ return 0;
+}
+
+/* I/O Functions */
+static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+ return 0;
+}
+
+static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ u32 bit = 1 << offset;
+
+ return (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+}
+
+static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
+static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+
+ return 0;
+}
+
+static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ return NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base) + offset;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ int mode;
+ unsigned i;
+ unsigned gpio = chip->base;
+ int is_out;
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ const char *modes[] = {
+ [NMK_GPIO_ALT_GPIO] = "gpio",
+ [NMK_GPIO_ALT_A] = "altA",
+ [NMK_GPIO_ALT_B] = "altB",
+ [NMK_GPIO_ALT_C] = "altC",
+ };
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ const char *label = gpiochip_is_requested(chip, i);
+ bool pull;
+ u32 bit = 1 << i;
+
+ is_out = readl(nmk_chip->addr + NMK_GPIO_DIR) & bit;
+ pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
+ mode = nmk_gpio_get_mode(gpio);
+ seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
+ gpio, label ?: "(none)",
+ is_out ? "out" : "in ",
+ chip->get
+ ? (chip->get(chip, i) ? "hi" : "lo")
+ : "? ",
+ (mode < 0) ? "unknown" : modes[mode],
+ pull ? "pull" : "none");
+
+ if (label && !is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+
+ if (nmk_chip->edge_rising & bitmask)
+ trigger = "edge-rising";
+ else if (nmk_chip->edge_falling & bitmask)
+ trigger = "edge-falling";
+ else
+ trigger = "edge-undefined";
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ irqd_is_wakeup_set(&desc->irq_data)
+ ? " wakeup" : "");
+ }
+ }
+
+ seq_printf(s, "\n");
+ }
+}
+
+#else
+#define nmk_gpio_dbg_show NULL
+#endif
+
+/* This structure is replicated for each GPIO block allocated at probe time */
+static struct gpio_chip nmk_gpio_template = {
+ .direction_input = nmk_gpio_make_input,
+ .get = nmk_gpio_get_input,
+ .direction_output = nmk_gpio_make_output,
+ .set = nmk_gpio_set_output,
+ .to_irq = nmk_gpio_to_irq,
+ .dbg_show = nmk_gpio_dbg_show,
+ .can_sleep = 0,
+};
+
+/*
+ * Called from the suspend/resume path to only keep the real wakeup interrupts
+ * (those that have had set_irq_wake() called on them) as wakeup interrupts,
+ * and not the rest of the interrupts which we needed to have as wakeups for
+ * cpuidle.
+ *
+ * PM ops are not used since this needs to be done at the end, after all the
+ * other drivers are done with their suspend callbacks.
+ */
+void nmk_gpio_wakeups_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
+ chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
+
+ writel(chip->rwimsc & chip->real_wake,
+ chip->addr + NMK_GPIO_RWIMSC);
+ writel(chip->fwimsc & chip->real_wake,
+ chip->addr + NMK_GPIO_FWIMSC);
+
+ if (chip->sleepmode) {
+ chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
+
+ /* 0 -> wakeup enable */
+ writel(~chip->real_wake, chip->addr + NMK_GPIO_SLPC);
+ }
+ }
+}
+
+void nmk_gpio_wakeups_resume(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
+ writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
+
+ if (chip->sleepmode)
+ writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+/*
+ * Read the pull up/pull down status.
+ * A bit set in 'pull_up' means that pull up
+ * is selected if pull is enabled in PDIS register.
+ * Note: only pull up/down set via this driver can
+ * be detected due to HW limitations.
+ */
+void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
+{
+ if (gpio_bank < NUM_BANKS) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank];
+
+ if (!chip)
+ return;
+
+ *pull_up = chip->pull_up;
+ }
+}
+
+static int __devinit nmk_gpio_probe(struct platform_device *dev)
+{
+ struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
+ struct nmk_gpio_chip *nmk_chip;
+ struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ int secondary_irq;
+ int irq;
+ int ret;
+
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+
+ secondary_irq = platform_get_irq(dev, 1);
+ if (secondary_irq >= 0 && !pdata->get_secondary_status) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (request_mem_region(res->start, resource_size(res),
+ dev_name(&dev->dev)) == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_release;
+ }
+
+ clk_enable(clk);
+
+ nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
+ if (!nmk_chip) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ /*
+ * The virt address in nmk_chip->addr is in the nomadik register space,
+ * so we can simply convert the resource address, without remapping
+ */
+ nmk_chip->bank = dev->id;
+ nmk_chip->clk = clk;
+ nmk_chip->addr = io_p2v(res->start);
+ nmk_chip->chip = nmk_gpio_template;
+ nmk_chip->parent_irq = irq;
+ nmk_chip->secondary_parent_irq = secondary_irq;
+ nmk_chip->get_secondary_status = pdata->get_secondary_status;
+ nmk_chip->set_ioforce = pdata->set_ioforce;
+ nmk_chip->sleepmode = pdata->supports_sleepmode;
+ spin_lock_init(&nmk_chip->lock);
+
+ chip = &nmk_chip->chip;
+ chip->base = pdata->first_gpio;
+ chip->ngpio = pdata->num_gpio;
+ chip->label = pdata->name ?: dev_name(&dev->dev);
+ chip->dev = &dev->dev;
+ chip->owner = THIS_MODULE;
+
+ ret = gpiochip_add(&nmk_chip->chip);
+ if (ret)
+ goto out_free;
+
+ BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+
+ nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
+ platform_set_drvdata(dev, nmk_chip);
+
+ nmk_gpio_init_irq(nmk_chip);
+
+ dev_info(&dev->dev, "Bits %i-%i at address %p\n",
+ nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+ return 0;
+
+out_free:
+ kfree(nmk_chip);
+out_clk:
+ clk_disable(clk);
+ clk_put(clk);
+out_release:
+ release_mem_region(res->start, resource_size(res));
+out:
+ dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
+ pdata->first_gpio, pdata->first_gpio+31);
+ return ret;
+}
+
+static struct platform_driver nmk_gpio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gpio",
+ },
+ .probe = nmk_gpio_probe,
+};
+
+static int __init nmk_gpio_init(void)
+{
+ return platform_driver_register(&nmk_gpio_driver);
+}
+
+core_initcall(nmk_gpio_init);
+
+MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
+MODULE_DESCRIPTION("Nomadik GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
new file mode 100644
index 000000000000..35bebde23e83
--- /dev/null
+++ b/drivers/gpio/gpio-omap.c
@@ -0,0 +1,2009 @@
+/*
+ * Support functions for OMAP GPIO
+ *
+ * Copyright (C) 2003-2005 Nokia Corporation
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/syscore_ops.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#include <asm/mach/irq.h>
+
+struct gpio_bank {
+ unsigned long pbase;
+ void __iomem *base;
+ u16 irq;
+ u16 virtual_irq_start;
+ int method;
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
+ u32 suspend_wakeup;
+ u32 saved_wakeup;
+#endif
+ u32 non_wakeup_gpios;
+ u32 enabled_non_wakeup_gpios;
+
+ u32 saved_datain;
+ u32 saved_fallingdetect;
+ u32 saved_risingdetect;
+ u32 level_mask;
+ u32 toggle_mask;
+ spinlock_t lock;
+ struct gpio_chip chip;
+ struct clk *dbck;
+ u32 mod_usage;
+ u32 dbck_enable_mask;
+ struct device *dev;
+ bool dbck_flag;
+ int stride;
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+struct omap3_gpio_regs {
+ u32 irqenable1;
+ u32 irqenable2;
+ u32 wake_en;
+ u32 ctrl;
+ u32 oe;
+ u32 leveldetect0;
+ u32 leveldetect1;
+ u32 risingdetect;
+ u32 fallingdetect;
+ u32 dataout;
+};
+
+static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
+#endif
+
+/*
+ * TODO: Cleanup gpio_bank usage as it is having information
+ * related to all instances of the device
+ */
+static struct gpio_bank *gpio_bank;
+
+static int bank_width;
+
+/* TODO: Analyze removing gpio_bank_count usage from driver code */
+int gpio_bank_count;
+
+static inline struct gpio_bank *get_gpio_bank(int gpio)
+{
+ if (cpu_is_omap15xx()) {
+ if (OMAP_GPIO_IS_MPUIO(gpio))
+ return &gpio_bank[0];
+ return &gpio_bank[1];
+ }
+ if (cpu_is_omap16xx()) {
+ if (OMAP_GPIO_IS_MPUIO(gpio))
+ return &gpio_bank[0];
+ return &gpio_bank[1 + (gpio >> 4)];
+ }
+ if (cpu_is_omap7xx()) {
+ if (OMAP_GPIO_IS_MPUIO(gpio))
+ return &gpio_bank[0];
+ return &gpio_bank[1 + (gpio >> 5)];
+ }
+ if (cpu_is_omap24xx())
+ return &gpio_bank[gpio >> 5];
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ return &gpio_bank[gpio >> 5];
+ BUG();
+ return NULL;
+}
+
+static inline int get_gpio_index(int gpio)
+{
+ if (cpu_is_omap7xx())
+ return gpio & 0x1f;
+ if (cpu_is_omap24xx())
+ return gpio & 0x1f;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ return gpio & 0x1f;
+ return gpio & 0x0f;
+}
+
+static inline int gpio_valid(int gpio)
+{
+ if (gpio < 0)
+ return -1;
+ if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
+ if (gpio >= OMAP_MAX_GPIO_LINES + 16)
+ return -1;
+ return 0;
+ }
+ if (cpu_is_omap15xx() && gpio < 16)
+ return 0;
+ if ((cpu_is_omap16xx()) && gpio < 64)
+ return 0;
+ if (cpu_is_omap7xx() && gpio < 192)
+ return 0;
+ if (cpu_is_omap2420() && gpio < 128)
+ return 0;
+ if (cpu_is_omap2430() && gpio < 160)
+ return 0;
+ if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
+ return 0;
+ return -1;
+}
+
+static int check_gpio(int gpio)
+{
+ if (unlikely(gpio_valid(gpio) < 0)) {
+ printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
+ dump_stack();
+ return -1;
+ }
+ return 0;
+}
+
+static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_IO_CNTL / bank->stride;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DIR_CONTROL;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DIRECTION;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DIR_CONTROL;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_OE;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_OE;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ l = __raw_readl(reg);
+ if (is_input)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ __raw_writel(l, reg);
+}
+
+static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_OUTPUT / bank->stride;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_OUTPUT;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ if (enable)
+ reg += OMAP1610_GPIO_SET_DATAOUT;
+ else
+ reg += OMAP1610_GPIO_CLEAR_DATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DATA_OUTPUT;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ if (enable)
+ reg += OMAP24XX_GPIO_SETDATAOUT;
+ else
+ reg += OMAP24XX_GPIO_CLEARDATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ if (enable)
+ reg += OMAP4_GPIO_SETDATAOUT;
+ else
+ reg += OMAP4_GPIO_CLEARDATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __raw_writel(l, reg);
+}
+
+static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+ reg = bank->base;
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_INPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DATAIN;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DATA_INPUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_DATAIN;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_DATAIN;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+ return (__raw_readl(reg)
+ & (1 << get_gpio_index(gpio))) != 0;
+}
+
+static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+ reg = bank->base;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_OUTPUT / bank->stride;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DATAOUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_DATAOUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_DATAOUT;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
+}
+
+#define MOD_REG_BIT(reg, bit_mask, set) \
+do { \
+ int l = __raw_readl(base + reg); \
+ if (set) l |= bit_mask; \
+ else l &= ~bit_mask; \
+ __raw_writel(l, base + reg); \
+} while(0)
+
+/**
+ * _set_gpio_debounce - low level gpio debounce time
+ * @bank: the gpio bank we're acting upon
+ * @gpio: the gpio number on this @gpio
+ * @debounce: debounce time to use
+ *
+ * OMAP's debounce time is in 31us steps so we need
+ * to convert and round up to the closest unit.
+ */
+static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+ unsigned debounce)
+{
+ void __iomem *reg = bank->base;
+ u32 val;
+ u32 l;
+
+ if (!bank->dbck_flag)
+ return;
+
+ if (debounce < 32)
+ debounce = 0x01;
+ else if (debounce > 7936)
+ debounce = 0xff;
+ else
+ debounce = (debounce / 0x1f) - 1;
+
+ l = 1 << get_gpio_index(gpio);
+
+ if (bank->method == METHOD_GPIO_44XX)
+ reg += OMAP4_GPIO_DEBOUNCINGTIME;
+ else
+ reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
+
+ __raw_writel(debounce, reg);
+
+ reg = bank->base;
+ if (bank->method == METHOD_GPIO_44XX)
+ reg += OMAP4_GPIO_DEBOUNCENABLE;
+ else
+ reg += OMAP24XX_GPIO_DEBOUNCE_EN;
+
+ val = __raw_readl(reg);
+
+ if (debounce) {
+ val |= l;
+ clk_enable(bank->dbck);
+ } else {
+ val &= ~l;
+ clk_disable(bank->dbck);
+ }
+ bank->dbck_enable_mask = val;
+
+ __raw_writel(val, reg);
+}
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ int trigger)
+{
+ void __iomem *base = bank->base;
+ u32 gpio_bit = 1 << gpio;
+
+ if (cpu_is_omap44xx()) {
+ MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ } else {
+ MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ }
+ if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
+ if (cpu_is_omap44xx()) {
+ MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
+ trigger != 0);
+ } else {
+ /*
+ * GPIO wakeup request can only be generated on edge
+ * transitions
+ */
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ __raw_writel(1 << gpio, bank->base
+ + OMAP24XX_GPIO_SETWKUENA);
+ else
+ __raw_writel(1 << gpio, bank->base
+ + OMAP24XX_GPIO_CLEARWKUENA);
+ }
+ }
+ /* This part needs to be executed always for OMAP{34xx, 44xx} */
+ if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
+ (bank->non_wakeup_gpios & gpio_bit)) {
+ /*
+ * Log the edge gpio and manually trigger the IRQ
+ * after resume if the input level changes
+ * to avoid irq lost during PER RET/OFF mode
+ * Applies for omap2 non-wakeup gpio and all omap3 gpios
+ */
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ bank->enabled_non_wakeup_gpios |= gpio_bit;
+ else
+ bank->enabled_non_wakeup_gpios &= ~gpio_bit;
+ }
+
+ if (cpu_is_omap44xx()) {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
+ } else {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ }
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP1
+/*
+ * This only applies to chips that can't do both rising and falling edge
+ * detection at once. For all other chips, this function is a noop.
+ */
+static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
+ break;
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_CONTROL;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_CONTROL;
+ break;
+#endif
+ default:
+ return;
+ }
+
+ l = __raw_readl(reg);
+ if ((l >> gpio) & 1)
+ l &= ~(1 << gpio);
+ else
+ l |= 1 << gpio;
+
+ __raw_writel(l, reg);
+}
+#endif
+
+static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
+ l = __raw_readl(reg);
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 1 << gpio;
+ else if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l &= ~(1 << gpio);
+ else
+ goto bad;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_CONTROL;
+ l = __raw_readl(reg);
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 1 << gpio;
+ else if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l &= ~(1 << gpio);
+ else
+ goto bad;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ if (gpio & 0x08)
+ reg += OMAP1610_GPIO_EDGE_CTRL2;
+ else
+ reg += OMAP1610_GPIO_EDGE_CTRL1;
+ gpio &= 0x07;
+ l = __raw_readl(reg);
+ l &= ~(3 << (gpio << 1));
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 2 << (gpio << 1);
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l |= 1 << (gpio << 1);
+ if (trigger)
+ /* Enable wake-up during idle for dynamic tick */
+ __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
+ else
+ __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_CONTROL;
+ l = __raw_readl(reg);
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 1 << gpio;
+ else if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l &= ~(1 << gpio);
+ else
+ goto bad;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ case METHOD_GPIO_24XX:
+ case METHOD_GPIO_44XX:
+ set_24xx_gpio_triggering(bank, gpio, trigger);
+ return 0;
+#endif
+ default:
+ goto bad;
+ }
+ __raw_writel(l, reg);
+ return 0;
+bad:
+ return -EINVAL;
+}
+
+static int gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ struct gpio_bank *bank;
+ unsigned gpio;
+ int retval;
+ unsigned long flags;
+
+ if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
+ gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+ else
+ gpio = d->irq - IH_GPIO_BASE;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+
+ if (type & ~IRQ_TYPE_SENSE_MASK)
+ return -EINVAL;
+
+ /* OMAP1 allows only only edge triggering */
+ if (!cpu_class_is_omap2()
+ && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
+ return -EINVAL;
+
+ bank = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&bank->lock, flags);
+ retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+
+ return retval;
+}
+
+static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+{
+ void __iomem *reg = bank->base;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ /* MPUIO irqstatus is reset by reading the status register,
+ * so do nothing here */
+ return;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_STATUS;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_IRQSTATUS1;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_STATUS;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_IRQSTATUS1;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_IRQSTATUS0;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __raw_writel(gpio_mask, reg);
+
+ /* Workaround for clearing DSP GPIO interrupts to allow retention */
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
+ else if (cpu_is_omap44xx())
+ reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ __raw_writel(gpio_mask, reg);
+
+ /* Flush posted write for the irq status to avoid spurious interrupts */
+ __raw_readl(reg);
+ }
+}
+
+static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
+{
+ _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
+}
+
+static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
+{
+ void __iomem *reg = bank->base;
+ int inv = 0;
+ u32 l;
+ u32 mask;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ mask = 0xffff;
+ inv = 1;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_MASK;
+ mask = 0xffff;
+ inv = 1;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_IRQENABLE1;
+ mask = 0xffff;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_MASK;
+ mask = 0xffffffff;
+ inv = 1;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_IRQENABLE1;
+ mask = 0xffffffff;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_IRQSTATUSSET0;
+ mask = 0xffffffff;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ l = __raw_readl(reg);
+ if (inv)
+ l = ~l;
+ l &= mask;
+ return l;
+}
+
+static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ l = __raw_readl(reg);
+ if (enable)
+ l &= ~(gpio_mask);
+ else
+ l |= gpio_mask;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_MASK;
+ l = __raw_readl(reg);
+ if (enable)
+ l &= ~(gpio_mask);
+ else
+ l |= gpio_mask;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ if (enable)
+ reg += OMAP1610_GPIO_SET_IRQENABLE1;
+ else
+ reg += OMAP1610_GPIO_CLEAR_IRQENABLE1;
+ l = gpio_mask;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_MASK;
+ l = __raw_readl(reg);
+ if (enable)
+ l &= ~(gpio_mask);
+ else
+ l |= gpio_mask;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ if (enable)
+ reg += OMAP24XX_GPIO_SETIRQENABLE1;
+ else
+ reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
+ l = gpio_mask;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ if (enable)
+ reg += OMAP4_GPIO_IRQSTATUSSET0;
+ else
+ reg += OMAP4_GPIO_IRQSTATUSCLR0;
+ l = gpio_mask;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __raw_writel(l, reg);
+}
+
+static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
+{
+ _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
+}
+
+/*
+ * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
+ * 1510 does not seem to have a wake-up register. If JTAG is connected
+ * to the target, system will wake up always on GPIO events. While
+ * system is running all registered GPIO interrupts need to have wake-up
+ * enabled. When system is suspended, only selected GPIO interrupts need
+ * to have wake-up enabled.
+ */
+static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
+{
+ unsigned long uninitialized_var(flags);
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_MPUIO:
+ case METHOD_GPIO_1610:
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= (1 << gpio);
+ else
+ bank->suspend_wakeup &= ~(1 << gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+#endif
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ case METHOD_GPIO_24XX:
+ case METHOD_GPIO_44XX:
+ if (bank->non_wakeup_gpios & (1 << gpio)) {
+ printk(KERN_ERR "Unable to modify wakeup on "
+ "non-wakeup GPIO%d\n",
+ (bank - gpio_bank) * 32 + gpio);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= (1 << gpio);
+ else
+ bank->suspend_wakeup &= ~(1 << gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+#endif
+ default:
+ printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
+ bank->method);
+ return -EINVAL;
+ }
+}
+
+static void _reset_gpio(struct gpio_bank *bank, int gpio)
+{
+ _set_gpio_direction(bank, get_gpio_index(gpio), 1);
+ _set_gpio_irqenable(bank, gpio, 0);
+ _clear_gpio_irqstatus(bank, gpio);
+ _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+}
+
+/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
+static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank;
+ int retval;
+
+ if (check_gpio(gpio) < 0)
+ return -ENODEV;
+ bank = irq_data_get_irq_chip_data(d);
+ retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
+
+ return retval;
+}
+
+static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ /* Set trigger to none. You need to enable the desired trigger with
+ * request_irq() or set_irq_type().
+ */
+ _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+
+#ifdef CONFIG_ARCH_OMAP15XX
+ if (bank->method == METHOD_GPIO_1510) {
+ void __iomem *reg;
+
+ /* Claim the pin for MPU */
+ reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
+ __raw_writel(__raw_readl(reg) | (1 << offset), reg);
+ }
+#endif
+ if (!cpu_class_is_omap1()) {
+ if (!bank->mod_usage) {
+ void __iomem *reg = bank->base;
+ u32 ctrl;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg += OMAP24XX_GPIO_CTRL;
+ else if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
+ /* Module is enabled, clocks are not gated */
+ ctrl &= 0xFFFFFFFE;
+ __raw_writel(ctrl, reg);
+ }
+ bank->mod_usage |= 1 << offset;
+ }
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+#ifdef CONFIG_ARCH_OMAP16XX
+ if (bank->method == METHOD_GPIO_1610) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ if (bank->method == METHOD_GPIO_24XX) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ if (bank->method == METHOD_GPIO_44XX) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
+ if (!cpu_class_is_omap1()) {
+ bank->mod_usage &= ~(1 << offset);
+ if (!bank->mod_usage) {
+ void __iomem *reg = bank->base;
+ u32 ctrl;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg += OMAP24XX_GPIO_CTRL;
+ else if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
+ /* Module is disabled, clocks are gated */
+ ctrl |= 1;
+ __raw_writel(ctrl, reg);
+ }
+ }
+ _reset_gpio(bank, bank->chip.base + offset);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+/*
+ * We need to unmask the GPIO bank interrupt as soon as possible to
+ * avoid missing GPIO interrupts for other lines in the bank.
+ * Then we need to mask-read-clear-unmask the triggered GPIO lines
+ * in the bank to avoid missing nested interrupts for a GPIO line.
+ * If we wait to unmask individual GPIO lines in the bank after the
+ * line's interrupt handler has been run, we may miss some nested
+ * interrupts.
+ */
+static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *isr_reg = NULL;
+ u32 isr;
+ unsigned int gpio_irq, gpio_index;
+ struct gpio_bank *bank;
+ u32 retrigger = 0;
+ int unmasked = 0;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ bank = irq_get_handler_data(irq);
+#ifdef CONFIG_ARCH_OMAP1
+ if (bank->method == METHOD_MPUIO)
+ isr_reg = bank->base +
+ OMAP_MPUIO_GPIO_INT / bank->stride;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ if (bank->method == METHOD_GPIO_1510)
+ isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
+#endif
+#if defined(CONFIG_ARCH_OMAP16XX)
+ if (bank->method == METHOD_GPIO_1610)
+ isr_reg = bank->base + OMAP1610_GPIO_IRQSTATUS1;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ if (bank->method == METHOD_GPIO_7XX)
+ isr_reg = bank->base + OMAP7XX_GPIO_INT_STATUS;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ if (bank->method == METHOD_GPIO_24XX)
+ isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ if (bank->method == METHOD_GPIO_44XX)
+ isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
+#endif
+
+ if (WARN_ON(!isr_reg))
+ goto exit;
+
+ while(1) {
+ u32 isr_saved, level_mask = 0;
+ u32 enabled;
+
+ enabled = _get_gpio_irqbank_mask(bank);
+ isr_saved = isr = __raw_readl(isr_reg) & enabled;
+
+ if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
+ isr &= 0x0000ffff;
+
+ if (cpu_class_is_omap2()) {
+ level_mask = bank->level_mask & enabled;
+ }
+
+ /* clear edge sensitive interrupts before handler(s) are
+ called so that we don't miss any interrupt occurred while
+ executing them */
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
+ _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
+
+ /* if there is only edge sensitive GPIO pin interrupts
+ configured, we could unmask GPIO bank interrupt immediately */
+ if (!level_mask && !unmasked) {
+ unmasked = 1;
+ chained_irq_exit(chip, desc);
+ }
+
+ isr |= retrigger;
+ retrigger = 0;
+ if (!isr)
+ break;
+
+ gpio_irq = bank->virtual_irq_start;
+ for (; isr != 0; isr >>= 1, gpio_irq++) {
+ gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
+
+ if (!(isr & 1))
+ continue;
+
+#ifdef CONFIG_ARCH_OMAP1
+ /*
+ * Some chips can't respond to both rising and falling
+ * at the same time. If this irq was requested with
+ * both flags, we need to flip the ICR data for the IRQ
+ * to respond to the IRQ for the opposite direction.
+ * This will be indicated in the bank toggle_mask.
+ */
+ if (bank->toggle_mask & (1 << gpio_index))
+ _toggle_gpio_edge_triggering(bank, gpio_index);
+#endif
+
+ generic_handle_irq(gpio_irq);
+ }
+ }
+ /* if bank has any level sensitive GPIO pin interrupt
+ configured, we must unmask the bank interrupt only after
+ handler(s) are executed in order to avoid spurious bank
+ interrupt */
+exit:
+ if (!unmasked)
+ chained_irq_exit(chip, desc);
+}
+
+static void gpio_irq_shutdown(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ _reset_gpio(bank, gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static void gpio_ack_irq(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _clear_gpio_irqstatus(bank, gpio);
+}
+
+static void gpio_mask_irq(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_irqenable(bank, gpio, 0);
+ _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static void gpio_unmask_irq(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned int irq_mask = 1 << get_gpio_index(gpio);
+ u32 trigger = irqd_get_trigger_type(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ if (trigger)
+ _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
+
+ /* For level-triggered GPIOs, the clearing must be done after
+ * the HW source is cleared, thus after the handler has run */
+ if (bank->level_mask & irq_mask) {
+ _set_gpio_irqenable(bank, gpio, 0);
+ _clear_gpio_irqstatus(bank, gpio);
+ }
+
+ _set_gpio_irqenable(bank, gpio, 1);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static struct irq_chip gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_shutdown = gpio_irq_shutdown,
+ .irq_ack = gpio_ack_irq,
+ .irq_mask = gpio_mask_irq,
+ .irq_unmask = gpio_unmask_irq,
+ .irq_set_type = gpio_irq_type,
+ .irq_set_wake = gpio_wake_enable,
+};
+
+/*---------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_OMAP1
+
+/* MPUIO uses the always-on 32k clock */
+
+static void mpuio_ack_irq(struct irq_data *d)
+{
+ /* The ISR is reset automatically, so do nothing here. */
+}
+
+static void mpuio_mask_irq(struct irq_data *d)
+{
+ unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _set_gpio_irqenable(bank, gpio, 0);
+}
+
+static void mpuio_unmask_irq(struct irq_data *d)
+{
+ unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _set_gpio_irqenable(bank, gpio, 1);
+}
+
+static struct irq_chip mpuio_irq_chip = {
+ .name = "MPUIO",
+ .irq_ack = mpuio_ack_irq,
+ .irq_mask = mpuio_mask_irq,
+ .irq_unmask = mpuio_unmask_irq,
+ .irq_set_type = gpio_irq_type,
+#ifdef CONFIG_ARCH_OMAP16XX
+ /* REVISIT: assuming only 16xx supports MPUIO wake events */
+ .irq_set_wake = gpio_wake_enable,
+#endif
+};
+
+
+#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
+
+
+#ifdef CONFIG_ARCH_OMAP16XX
+
+#include <linux/platform_device.h>
+
+static int omap_mpuio_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *mask_reg = bank->base +
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->saved_wakeup = __raw_readl(mask_reg);
+ __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static int omap_mpuio_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *mask_reg = bank->base +
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ __raw_writel(bank->saved_wakeup, mask_reg);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
+ .suspend_noirq = omap_mpuio_suspend_noirq,
+ .resume_noirq = omap_mpuio_resume_noirq,
+};
+
+/* use platform_driver for this. */
+static struct platform_driver omap_mpuio_driver = {
+ .driver = {
+ .name = "mpuio",
+ .pm = &omap_mpuio_dev_pm_ops,
+ },
+};
+
+static struct platform_device omap_mpuio_device = {
+ .name = "mpuio",
+ .id = -1,
+ .dev = {
+ .driver = &omap_mpuio_driver.driver,
+ }
+ /* could list the /proc/iomem resources */
+};
+
+static inline void mpuio_init(void)
+{
+ struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
+ platform_set_drvdata(&omap_mpuio_device, bank);
+
+ if (platform_driver_register(&omap_mpuio_driver) == 0)
+ (void) platform_device_register(&omap_mpuio_device);
+}
+
+#else
+static inline void mpuio_init(void) {}
+#endif /* 16xx */
+
+#else
+
+extern struct irq_chip mpuio_irq_chip;
+
+#define bank_is_mpuio(bank) 0
+static inline void mpuio_init(void) {}
+
+#endif
+
+/*---------------------------------------------------------------------*/
+
+/* REVISIT these are stupid implementations! replace by ones that
+ * don't switch on METHOD_* and which mostly avoid spinlocks
+ */
+
+static int gpio_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_direction(bank, offset, 1);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+}
+
+static int gpio_is_input(struct gpio_bank *bank, int mask)
+{
+ void __iomem *reg = bank->base;
+
+ switch (bank->method) {
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_IO_CNTL / bank->stride;
+ break;
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DIRECTION;
+ break;
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_OE;
+ break;
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_OE;
+ break;
+ default:
+ WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
+ return -EINVAL;
+ }
+ return __raw_readl(reg) & mask;
+}
+
+static int gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+ void __iomem *reg;
+ int gpio;
+ u32 mask;
+
+ gpio = chip->base + offset;
+ bank = get_gpio_bank(gpio);
+ reg = bank->base;
+ mask = 1 << get_gpio_index(gpio);
+
+ if (gpio_is_input(bank, mask))
+ return _get_gpio_datain(bank, gpio);
+ else
+ return _get_gpio_dataout(bank, gpio);
+}
+
+static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_dataout(bank, offset, value);
+ _set_gpio_direction(bank, offset, 0);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+}
+
+static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+
+ if (!bank->dbck) {
+ bank->dbck = clk_get(bank->dev, "dbclk");
+ if (IS_ERR(bank->dbck))
+ dev_err(bank->dev, "Could not get gpio dbck\n");
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_debounce(bank, offset, debounce);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_dataout(bank, offset, value);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ return bank->virtual_irq_start + offset;
+}
+
+/*---------------------------------------------------------------------*/
+
+static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+{
+ u32 rev;
+
+ if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
+ rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
+ else if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
+ else if (cpu_is_omap44xx())
+ rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
+ else
+ return;
+
+ printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
+ (rev >> 4) & 0x0f, rev & 0x0f);
+}
+
+/* This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
+static inline int init_gpio_info(struct platform_device *pdev)
+{
+ /* TODO: Analyze removing gpio_bank_count usage from driver code */
+ gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
+ GFP_KERNEL);
+ if (!gpio_bank) {
+ dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* TODO: Cleanup cpu_is_* checks */
+static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
+{
+ if (cpu_class_is_omap2()) {
+ if (cpu_is_omap44xx()) {
+ __raw_writel(0xffffffff, bank->base +
+ OMAP4_GPIO_IRQSTATUSCLR0);
+ __raw_writel(0x00000000, bank->base +
+ OMAP4_GPIO_DEBOUNCENABLE);
+ /* Initialize interface clk ungated, module enabled */
+ __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
+ } else if (cpu_is_omap34xx()) {
+ __raw_writel(0x00000000, bank->base +
+ OMAP24XX_GPIO_IRQENABLE1);
+ __raw_writel(0xffffffff, bank->base +
+ OMAP24XX_GPIO_IRQSTATUS1);
+ __raw_writel(0x00000000, bank->base +
+ OMAP24XX_GPIO_DEBOUNCE_EN);
+
+ /* Initialize interface clk ungated, module enabled */
+ __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
+ } else if (cpu_is_omap24xx()) {
+ static const u32 non_wakeup_gpios[] = {
+ 0xe203ffc0, 0x08700040
+ };
+ if (id < ARRAY_SIZE(non_wakeup_gpios))
+ bank->non_wakeup_gpios = non_wakeup_gpios[id];
+ }
+ } else if (cpu_class_is_omap1()) {
+ if (bank_is_mpuio(bank))
+ __raw_writew(0xffff, bank->base +
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride);
+ if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
+ __raw_writew(0xffff, bank->base
+ + OMAP1510_GPIO_INT_MASK);
+ __raw_writew(0x0000, bank->base
+ + OMAP1510_GPIO_INT_STATUS);
+ }
+ if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
+ __raw_writew(0x0000, bank->base
+ + OMAP1610_GPIO_IRQENABLE1);
+ __raw_writew(0xffff, bank->base
+ + OMAP1610_GPIO_IRQSTATUS1);
+ __raw_writew(0x0014, bank->base
+ + OMAP1610_GPIO_SYSCONFIG);
+
+ /*
+ * Enable system clock for GPIO module.
+ * The CAM_CLK_CTRL *is* really the right place.
+ */
+ omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
+ ULPD_CAM_CLK_CTRL);
+ }
+ if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
+ __raw_writel(0xffffffff, bank->base
+ + OMAP7XX_GPIO_INT_MASK);
+ __raw_writel(0x00000000, bank->base
+ + OMAP7XX_GPIO_INT_STATUS);
+ }
+ }
+}
+
+static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
+{
+ int j;
+ static int gpio;
+
+ bank->mod_usage = 0;
+ /*
+ * REVISIT eventually switch from OMAP-specific gpio structs
+ * over to the generic ones
+ */
+ bank->chip.request = omap_gpio_request;
+ bank->chip.free = omap_gpio_free;
+ bank->chip.direction_input = gpio_input;
+ bank->chip.get = gpio_get;
+ bank->chip.direction_output = gpio_output;
+ bank->chip.set_debounce = gpio_debounce;
+ bank->chip.set = gpio_set;
+ bank->chip.to_irq = gpio_2irq;
+ if (bank_is_mpuio(bank)) {
+ bank->chip.label = "mpuio";
+#ifdef CONFIG_ARCH_OMAP16XX
+ bank->chip.dev = &omap_mpuio_device.dev;
+#endif
+ bank->chip.base = OMAP_MPUIO(0);
+ } else {
+ bank->chip.label = "gpio";
+ bank->chip.base = gpio;
+ gpio += bank_width;
+ }
+ bank->chip.ngpio = bank_width;
+
+ gpiochip_add(&bank->chip);
+
+ for (j = bank->virtual_irq_start;
+ j < bank->virtual_irq_start + bank_width; j++) {
+ irq_set_lockdep_class(j, &gpio_lock_class);
+ irq_set_chip_data(j, bank);
+ if (bank_is_mpuio(bank))
+ irq_set_chip(j, &mpuio_irq_chip);
+ else
+ irq_set_chip(j, &gpio_irq_chip);
+ irq_set_handler(j, handle_simple_irq);
+ set_irq_flags(j, IRQF_VALID);
+ }
+ irq_set_chained_handler(bank->irq, gpio_irq_handler);
+ irq_set_handler_data(bank->irq, bank);
+}
+
+static int __devinit omap_gpio_probe(struct platform_device *pdev)
+{
+ static int gpio_init_done;
+ struct omap_gpio_platform_data *pdata;
+ struct resource *res;
+ int id;
+ struct gpio_bank *bank;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+
+ if (!gpio_init_done) {
+ int ret;
+
+ ret = init_gpio_info(pdev);
+ if (ret)
+ return ret;
+ }
+
+ id = pdev->id;
+ bank = &gpio_bank[id];
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(!res)) {
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
+ return -ENODEV;
+ }
+
+ bank->irq = res->start;
+ bank->virtual_irq_start = pdata->virtual_irq_start;
+ bank->method = pdata->bank_type;
+ bank->dev = &pdev->dev;
+ bank->dbck_flag = pdata->dbck_flag;
+ bank->stride = pdata->bank_stride;
+ bank_width = pdata->bank_width;
+
+ spin_lock_init(&bank->lock);
+
+ /* Static mapping, never released */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res)) {
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
+ return -ENODEV;
+ }
+
+ bank->base = ioremap(res->start, resource_size(res));
+ if (!bank->base) {
+ dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
+ return -ENOMEM;
+ }
+
+ pm_runtime_enable(bank->dev);
+ pm_runtime_get_sync(bank->dev);
+
+ omap_gpio_mod_init(bank, id);
+ omap_gpio_chip_init(bank);
+ omap_gpio_show_rev(bank);
+
+ if (!gpio_init_done)
+ gpio_init_done = 1;
+
+ return 0;
+}
+
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
+static int omap_gpio_suspend(void)
+{
+ int i;
+
+ if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
+ return 0;
+
+ for (i = 0; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ void __iomem *wake_status;
+ void __iomem *wake_clear;
+ void __iomem *wake_set;
+ unsigned long flags;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
+ wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
+ wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
+ wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
+ wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ break;
+#endif
+ default:
+ continue;
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->saved_wakeup = __raw_readl(wake_status);
+ __raw_writel(0xffffffff, wake_clear);
+ __raw_writel(bank->suspend_wakeup, wake_set);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+ return 0;
+}
+
+static void omap_gpio_resume(void)
+{
+ int i;
+
+ if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
+ return;
+
+ for (i = 0; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ void __iomem *wake_clear;
+ void __iomem *wake_set;
+ unsigned long flags;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
+ wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
+ wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ break;
+#endif
+ default:
+ continue;
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ __raw_writel(0xffffffff, wake_clear);
+ __raw_writel(bank->saved_wakeup, wake_set);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ }
+}
+
+static struct syscore_ops omap_gpio_syscore_ops = {
+ .suspend = omap_gpio_suspend,
+ .resume = omap_gpio_resume,
+};
+
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+
+static int workaround_enabled;
+
+void omap2_gpio_prepare_for_idle(int off_mode)
+{
+ int i, c = 0;
+ int min = 0;
+
+ if (cpu_is_omap34xx())
+ min = 1;
+
+ for (i = min; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ u32 l1 = 0, l2 = 0;
+ int j;
+
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
+ clk_disable(bank->dbck);
+
+ if (!off_mode)
+ continue;
+
+ /* If going to OFF, remove triggering for all
+ * non-wakeup GPIOs. Otherwise spurious IRQs will be
+ * generated. See OMAP2420 Errata item 1.101. */
+ if (!(bank->enabled_non_wakeup_gpios))
+ continue;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP24XX_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_FALLINGDETECT);
+ l2 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_RISINGDETECT);
+ }
+
+ if (cpu_is_omap44xx()) {
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP4_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base +
+ OMAP4_GPIO_FALLINGDETECT);
+ l2 = __raw_readl(bank->base +
+ OMAP4_GPIO_RISINGDETECT);
+ }
+
+ bank->saved_fallingdetect = l1;
+ bank->saved_risingdetect = l2;
+ l1 &= ~bank->enabled_non_wakeup_gpios;
+ l2 &= ~bank->enabled_non_wakeup_gpios;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ __raw_writel(l1, bank->base +
+ OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base +
+ OMAP24XX_GPIO_RISINGDETECT);
+ }
+
+ if (cpu_is_omap44xx()) {
+ __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
+ }
+
+ c++;
+ }
+ if (!c) {
+ workaround_enabled = 0;
+ return;
+ }
+ workaround_enabled = 1;
+}
+
+void omap2_gpio_resume_after_idle(void)
+{
+ int i;
+ int min = 0;
+
+ if (cpu_is_omap34xx())
+ min = 1;
+ for (i = min; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ u32 l = 0, gen, gen0, gen1;
+ int j;
+
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
+ clk_enable(bank->dbck);
+
+ if (!workaround_enabled)
+ continue;
+
+ if (!(bank->enabled_non_wakeup_gpios))
+ continue;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
+ }
+
+ if (cpu_is_omap44xx()) {
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + OMAP4_GPIO_RISINGDETECT);
+ l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
+ }
+
+ /* Check if any of the non-wakeup interrupt GPIOs have changed
+ * state. If so, generate an IRQ by software. This is
+ * horribly racy, but it's the best we can do to work around
+ * this silicon bug. */
+ l ^= bank->saved_datain;
+ l &= bank->enabled_non_wakeup_gpios;
+
+ /*
+ * No need to generate IRQs for the rising edge for gpio IRQs
+ * configured with falling edge only; and vice versa.
+ */
+ gen0 = l & bank->saved_fallingdetect;
+ gen0 &= bank->saved_datain;
+
+ gen1 = l & bank->saved_risingdetect;
+ gen1 &= ~(bank->saved_datain);
+
+ /* FIXME: Consider GPIO IRQs with level detections properly! */
+ gen = l & (~(bank->saved_fallingdetect) &
+ ~(bank->saved_risingdetect));
+ /* Consider all GPIO IRQs needed to be updated */
+ gen |= gen0 | gen1;
+
+ if (gen) {
+ u32 old0, old1;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ old0 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_LEVELDETECT1);
+ __raw_writel(old0 | gen, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ __raw_writel(old1 | gen, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT1);
+ __raw_writel(old0, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ __raw_writel(old1, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT1);
+ }
+
+ if (cpu_is_omap44xx()) {
+ old0 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ __raw_writel(old0 | l, bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ __raw_writel(old1 | l, bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ __raw_writel(old0, bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ __raw_writel(old1, bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ }
+ }
+ }
+
+}
+
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+/* save the registers of bank 2-6 */
+void omap_gpio_save_context(void)
+{
+ int i;
+
+ /* saving banks from 2-6 only since GPIO1 is in WKUP */
+ for (i = 1; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ gpio_context[i].irqenable1 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
+ gpio_context[i].irqenable2 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
+ gpio_context[i].wake_en =
+ __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
+ gpio_context[i].ctrl =
+ __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
+ gpio_context[i].oe =
+ __raw_readl(bank->base + OMAP24XX_GPIO_OE);
+ gpio_context[i].leveldetect0 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
+ gpio_context[i].leveldetect1 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ gpio_context[i].risingdetect =
+ __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ gpio_context[i].fallingdetect =
+ __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ gpio_context[i].dataout =
+ __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
+ }
+}
+
+/* restore the required registers of bank 2-6 */
+void omap_gpio_restore_context(void)
+{
+ int i;
+
+ for (i = 1; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ __raw_writel(gpio_context[i].irqenable1,
+ bank->base + OMAP24XX_GPIO_IRQENABLE1);
+ __raw_writel(gpio_context[i].irqenable2,
+ bank->base + OMAP24XX_GPIO_IRQENABLE2);
+ __raw_writel(gpio_context[i].wake_en,
+ bank->base + OMAP24XX_GPIO_WAKE_EN);
+ __raw_writel(gpio_context[i].ctrl,
+ bank->base + OMAP24XX_GPIO_CTRL);
+ __raw_writel(gpio_context[i].oe,
+ bank->base + OMAP24XX_GPIO_OE);
+ __raw_writel(gpio_context[i].leveldetect0,
+ bank->base + OMAP24XX_GPIO_LEVELDETECT0);
+ __raw_writel(gpio_context[i].leveldetect1,
+ bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ __raw_writel(gpio_context[i].risingdetect,
+ bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ __raw_writel(gpio_context[i].fallingdetect,
+ bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(gpio_context[i].dataout,
+ bank->base + OMAP24XX_GPIO_DATAOUT);
+ }
+}
+#endif
+
+static struct platform_driver omap_gpio_driver = {
+ .probe = omap_gpio_probe,
+ .driver = {
+ .name = "omap_gpio",
+ },
+};
+
+/*
+ * gpio driver register needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence omap_gpio_drv_reg() is a postcore_initcall.
+ */
+static int __init omap_gpio_drv_reg(void)
+{
+ return platform_driver_register(&omap_gpio_driver);
+}
+postcore_initcall(omap_gpio_drv_reg);
+
+static int __init omap_gpio_sysinit(void)
+{
+ mpuio_init();
+
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
+ if (cpu_is_omap16xx() || cpu_class_is_omap2())
+ register_syscore_ops(&omap_gpio_syscore_ops);
+#endif
+
+ return 0;
+}
+
+arch_initcall(omap_gpio_sysinit);
diff --git a/drivers/gpio/gpio-plat-samsung.c b/drivers/gpio/gpio-plat-samsung.c
new file mode 100644
index 000000000000..ea37c0461788
--- /dev/null
+++ b/drivers/gpio/gpio-plat-samsung.c
@@ -0,0 +1,206 @@
+/* arch/arm/plat-samsung/gpiolib.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * SAMSUNG - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+#ifndef DEBUG_GPIO
+#define gpio_dbg(x...) do { } while (0)
+#else
+#define gpio_dbg(x...) printk(KERN_DEBUG x)
+#endif
+
+/* The samsung_gpiolib_4bit routines are to control the gpio banks where
+ * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the
+ * following example:
+ *
+ * base + 0x00: Control register, 4 bits per gpio
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x04: Data register, 1 bit per gpio
+ * bit n: data bit n
+ *
+ * Note, since the data register is one bit per gpio and is at base + 0x4
+ * we can use s3c_gpiolib_get and s3c_gpiolib_set to change the state of
+ * the output.
+*/
+
+static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long con;
+
+ con = __raw_readl(base + GPIOCON_OFF);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, base + GPIOCON_OFF);
+
+ gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
+
+ return 0;
+}
+
+static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long con;
+ unsigned long dat;
+
+ con = __raw_readl(base + GPIOCON_OFF);
+ con &= ~(0xf << con_4bit_shift(offset));
+ con |= 0x1 << con_4bit_shift(offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(dat, base + GPIODAT_OFF);
+ __raw_writel(con, base + GPIOCON_OFF);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+
+ return 0;
+}
+
+/* The next set of routines are for the case where the GPIO configuration
+ * registers are 4 bits per GPIO but there is more than one register (the
+ * bank has more than 8 GPIOs.
+ *
+ * This case is the similar to the 4 bit case, but the registers are as
+ * follows:
+ *
+ * base + 0x00: Control register, 4 bits per gpio (lower 8 GPIOs)
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x04: Control register, 4 bits per gpio (up to 8 additions GPIOs)
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x08: Data register, 1 bit per gpio
+ * bit n: data bit n
+ *
+ * To allow us to use the s3c_gpiolib_get and s3c_gpiolib_set routines we
+ * store the 'base + 0x4' address so that these routines see the data
+ * register at ourchip->base + 0x04.
+ */
+
+static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+
+ if (offset > 7)
+ offset -= 8;
+ else
+ regcon -= 4;
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, regcon);
+
+ gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
+
+ return 0;
+}
+
+static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+ unsigned long dat;
+ unsigned con_offset = offset;
+
+ if (con_offset > 7)
+ con_offset -= 8;
+ else
+ regcon -= 4;
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(con_offset));
+ con |= 0x1 << con_4bit_shift(con_offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(dat, base + GPIODAT_OFF);
+ __raw_writel(con, regcon);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+
+ return 0;
+}
+
+void __init samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip)
+{
+ chip->chip.direction_input = samsung_gpiolib_4bit_input;
+ chip->chip.direction_output = samsung_gpiolib_4bit_output;
+ chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
+}
+
+void __init samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip)
+{
+ chip->chip.direction_input = samsung_gpiolib_4bit2_input;
+ chip->chip.direction_output = samsung_gpiolib_4bit2_output;
+ chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
+}
+
+void __init samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++) {
+ samsung_gpiolib_add_4bit(chip);
+ s3c_gpiolib_add(chip);
+ }
+}
+
+void __init samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++) {
+ samsung_gpiolib_add_4bit2(chip);
+ s3c_gpiolib_add(chip);
+ }
+}
+
+void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++)
+ s3c_gpiolib_add(chip);
+}
diff --git a/drivers/gpio/gpio-s5pc100.c b/drivers/gpio/gpio-s5pc100.c
new file mode 100644
index 000000000000..2842394b28b5
--- /dev/null
+++ b/drivers/gpio/gpio-s5pc100.c
@@ -0,0 +1,355 @@
+/* linux/arch/arm/mach-s5pc100/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Copyright 2009 Samsung Electronics Co
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * S5PC100 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+/* S5PC100 GPIO bank summary:
+ *
+ * Bank GPIOs Style INT Type
+ * A0 8 4Bit GPIO_INT0
+ * A1 5 4Bit GPIO_INT1
+ * B 8 4Bit GPIO_INT2
+ * C 5 4Bit GPIO_INT3
+ * D 7 4Bit GPIO_INT4
+ * E0 8 4Bit GPIO_INT5
+ * E1 6 4Bit GPIO_INT6
+ * F0 8 4Bit GPIO_INT7
+ * F1 8 4Bit GPIO_INT8
+ * F2 8 4Bit GPIO_INT9
+ * F3 4 4Bit GPIO_INT10
+ * G0 8 4Bit GPIO_INT11
+ * G1 3 4Bit GPIO_INT12
+ * G2 7 4Bit GPIO_INT13
+ * G3 7 4Bit GPIO_INT14
+ * H0 8 4Bit WKUP_INT
+ * H1 8 4Bit WKUP_INT
+ * H2 8 4Bit WKUP_INT
+ * H3 8 4Bit WKUP_INT
+ * I 8 4Bit GPIO_INT15
+ * J0 8 4Bit GPIO_INT16
+ * J1 5 4Bit GPIO_INT17
+ * J2 8 4Bit GPIO_INT18
+ * J3 8 4Bit GPIO_INT19
+ * J4 4 4Bit GPIO_INT20
+ * K0 8 4Bit None
+ * K1 6 4Bit None
+ * K2 8 4Bit None
+ * K3 8 4Bit None
+ * L0 8 4Bit None
+ * L1 8 4Bit None
+ * L2 8 4Bit None
+ * L3 8 4Bit None
+ */
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_eint = {
+ .cfg_eint = 0xf,
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/*
+ * GPIO bank's base address given the index of the bank in the
+ * list of all gpio banks.
+ */
+#define S5PC100_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
+
+/*
+ * Following are the gpio banks in S5PC100.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
+ {
+ .chip = {
+ .base = S5PC100_GPA0(0),
+ .ngpio = S5PC100_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPA1(0),
+ .ngpio = S5PC100_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPB(0),
+ .ngpio = S5PC100_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPC(0),
+ .ngpio = S5PC100_GPIO_C_NR,
+ .label = "GPC",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPD(0),
+ .ngpio = S5PC100_GPIO_D_NR,
+ .label = "GPD",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPE0(0),
+ .ngpio = S5PC100_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPE1(0),
+ .ngpio = S5PC100_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF0(0),
+ .ngpio = S5PC100_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF1(0),
+ .ngpio = S5PC100_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF2(0),
+ .ngpio = S5PC100_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF3(0),
+ .ngpio = S5PC100_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG0(0),
+ .ngpio = S5PC100_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG1(0),
+ .ngpio = S5PC100_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG2(0),
+ .ngpio = S5PC100_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG3(0),
+ .ngpio = S5PC100_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPI(0),
+ .ngpio = S5PC100_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ0(0),
+ .ngpio = S5PC100_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ1(0),
+ .ngpio = S5PC100_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ2(0),
+ .ngpio = S5PC100_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ3(0),
+ .ngpio = S5PC100_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ4(0),
+ .ngpio = S5PC100_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK0(0),
+ .ngpio = S5PC100_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK1(0),
+ .ngpio = S5PC100_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK2(0),
+ .ngpio = S5PC100_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK3(0),
+ .ngpio = S5PC100_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL0(0),
+ .ngpio = S5PC100_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL1(0),
+ .ngpio = S5PC100_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL2(0),
+ .ngpio = S5PC100_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL3(0),
+ .ngpio = S5PC100_GPIO_L3_NR,
+ .label = "GPL3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL4(0),
+ .ngpio = S5PC100_GPIO_L4_NR,
+ .label = "GPL4",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PC100_GPH0(0),
+ .ngpio = S5PC100_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PC100_GPH1(0),
+ .ngpio = S5PC100_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PC100_GPH2(0),
+ .ngpio = S5PC100_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PC100_GPH3(0),
+ .ngpio = S5PC100_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static __init int s5pc100_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip = s5pc100_gpio_chips;
+ int nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
+ int gpioint_group = 0;
+ int i;
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ chip->group = gpioint_group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5PC100_BANK_BASE(i);
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips, nr_chips);
+ s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
+
+ return 0;
+}
+core_initcall(s5pc100_gpiolib_init);
diff --git a/drivers/gpio/gpio-s5pv210.c b/drivers/gpio/gpio-s5pv210.c
new file mode 100644
index 000000000000..1ba20a703e05
--- /dev/null
+++ b/drivers/gpio/gpio-s5pv210.c
@@ -0,0 +1,288 @@
+/* linux/arch/arm/mach-s5pv210/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV210 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+#include <mach/map.h>
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/* GPIO bank's base address given the index of the bank in the
+ * list of all gpio banks.
+ */
+#define S5PV210_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
+
+/*
+ * Following are the gpio banks in v210.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
+ {
+ .chip = {
+ .base = S5PV210_GPA0(0),
+ .ngpio = S5PV210_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPA1(0),
+ .ngpio = S5PV210_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPB(0),
+ .ngpio = S5PV210_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC0(0),
+ .ngpio = S5PV210_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC1(0),
+ .ngpio = S5PV210_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD0(0),
+ .ngpio = S5PV210_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD1(0),
+ .ngpio = S5PV210_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE0(0),
+ .ngpio = S5PV210_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE1(0),
+ .ngpio = S5PV210_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF0(0),
+ .ngpio = S5PV210_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF1(0),
+ .ngpio = S5PV210_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF2(0),
+ .ngpio = S5PV210_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF3(0),
+ .ngpio = S5PV210_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG0(0),
+ .ngpio = S5PV210_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG1(0),
+ .ngpio = S5PV210_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG2(0),
+ .ngpio = S5PV210_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG3(0),
+ .ngpio = S5PV210_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_GPI(0),
+ .ngpio = S5PV210_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ0(0),
+ .ngpio = S5PV210_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ1(0),
+ .ngpio = S5PV210_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ2(0),
+ .ngpio = S5PV210_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ3(0),
+ .ngpio = S5PV210_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ4(0),
+ .ngpio = S5PV210_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP01(0),
+ .ngpio = S5PV210_GPIO_MP01_NR,
+ .label = "MP01",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP02(0),
+ .ngpio = S5PV210_GPIO_MP02_NR,
+ .label = "MP02",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP03(0),
+ .ngpio = S5PV210_GPIO_MP03_NR,
+ .label = "MP03",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP04(0),
+ .ngpio = S5PV210_GPIO_MP04_NR,
+ .label = "MP04",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP05(0),
+ .ngpio = S5PV210_GPIO_MP05_NR,
+ .label = "MP05",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PV210_GPH0(0),
+ .ngpio = S5PV210_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PV210_GPH1(0),
+ .ngpio = S5PV210_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PV210_GPH2(0),
+ .ngpio = S5PV210_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PV210_GPH3(0),
+ .ngpio = S5PV210_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static __init int s5pv210_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
+ int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
+ int gpioint_group = 0;
+ int i = 0;
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ chip->group = gpioint_group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5PV210_BANK_BASE(i);
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pv210_gpio_4bit, nr_chips);
+ s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
+
+ return 0;
+}
+core_initcall(s5pv210_gpiolib_init);
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
new file mode 100644
index 000000000000..d92790140fe5
--- /dev/null
+++ b/drivers/gpio/gpio-u300.c
@@ -0,0 +1,700 @@
+/*
+ *
+ * arch/arm/mach-u300/gpio.c
+ *
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * U300 GPIO module.
+ * This can driver either of the two basic GPIO cores
+ * available in the U300 platforms:
+ * COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
+ * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
+ * Notice that you also have inline macros in <asm-arch/gpio.h>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+/* Reference to GPIO block clock */
+static struct clk *clk;
+
+/* Memory resource */
+static struct resource *memres;
+static void __iomem *virtbase;
+static struct device *gpiodev;
+
+struct u300_gpio_port {
+ const char *name;
+ int irq;
+ int number;
+};
+
+
+static struct u300_gpio_port gpio_ports[] = {
+ {
+ .name = "gpio0",
+ .number = 0,
+ },
+ {
+ .name = "gpio1",
+ .number = 1,
+ },
+ {
+ .name = "gpio2",
+ .number = 2,
+ },
+#ifdef U300_COH901571_3
+ {
+ .name = "gpio3",
+ .number = 3,
+ },
+ {
+ .name = "gpio4",
+ .number = 4,
+ },
+#ifdef CONFIG_MACH_U300_BS335
+ {
+ .name = "gpio5",
+ .number = 5,
+ },
+ {
+ .name = "gpio6",
+ .number = 6,
+ },
+#endif
+#endif
+
+};
+
+
+#ifdef U300_COH901571_3
+
+/* Default input value */
+#define DEFAULT_OUTPUT_LOW 0
+#define DEFAULT_OUTPUT_HIGH 1
+
+/* GPIO Pull-Up status */
+#define DISABLE_PULL_UP 0
+#define ENABLE_PULL_UP 1
+
+#define GPIO_NOT_USED 0
+#define GPIO_IN 1
+#define GPIO_OUT 2
+
+struct u300_gpio_configuration_data {
+ unsigned char pin_usage;
+ unsigned char default_output_value;
+ unsigned char pull_up;
+};
+
+/* Initial configuration */
+const struct u300_gpio_configuration_data
+u300_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
+#ifdef CONFIG_MACH_U300_BS335
+ /* Port 0, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 1, pins 0-7 */
+ {
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 2, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ },
+ /* Port 3, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 4, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 5, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 6, pind 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ }
+#endif
+
+#ifdef CONFIG_MACH_U300_BS365
+ /* Port 0, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 1, pins 0-7 */
+ {
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 2, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ },
+ /* Port 3, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ },
+ /* Port 4, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ /* These 4 pins doesn't exist on DB3210 */
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ }
+#endif
+};
+#endif
+
+
+/* No users == we can power down GPIO */
+static int gpio_users;
+
+struct gpio_struct {
+ int (*callback)(void *);
+ void *data;
+ int users;
+};
+
+static struct gpio_struct gpio_pin[U300_GPIO_MAX];
+
+/*
+ * Let drivers register callback in order to get notified when there is
+ * an interrupt on the gpio pin
+ */
+int gpio_register_callback(unsigned gpio, int (*func)(void *arg), void *data)
+{
+ if (gpio_pin[gpio].callback)
+ dev_warn(gpiodev, "%s: WARNING: callback already "
+ "registered for gpio pin#%d\n", __func__, gpio);
+ gpio_pin[gpio].callback = func;
+ gpio_pin[gpio].data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_register_callback);
+
+int gpio_unregister_callback(unsigned gpio)
+{
+ if (!gpio_pin[gpio].callback)
+ dev_warn(gpiodev, "%s: WARNING: callback already "
+ "unregistered for gpio pin#%d\n", __func__, gpio);
+ gpio_pin[gpio].callback = NULL;
+ gpio_pin[gpio].data = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_unregister_callback);
+
+/* Non-zero means valid */
+int gpio_is_valid(int number)
+{
+ if (number >= 0 &&
+ number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(gpio_is_valid);
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ if (gpio_pin[gpio].users)
+ return -EINVAL;
+ else
+ gpio_pin[gpio].users++;
+
+ gpio_users++;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+ gpio_users--;
+ gpio_pin[gpio].users--;
+ if (unlikely(gpio_pin[gpio].users < 0)) {
+ dev_warn(gpiodev, "warning: gpio#%d release mismatch\n",
+ gpio);
+ gpio_pin[gpio].users = 0;
+ }
+
+ return;
+}
+EXPORT_SYMBOL(gpio_free);
+
+/* This returns zero or nonzero */
+int gpio_get_value(unsigned gpio)
+{
+ return readl(virtbase + U300_GPIO_PXPDIR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) & (1 << (gpio & 0x07));
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+/*
+ * We hope that the compiler will optimize away the unused branch
+ * in case "value" is a constant
+ */
+void gpio_set_value(unsigned gpio, int value)
+{
+ u32 val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (value) {
+ /* set */
+ val = readl(virtbase + U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
+ & (1 << (gpio & 0x07));
+ writel(val | (1 << (gpio & 0x07)), virtbase +
+ U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ } else {
+ /* clear */
+ val = readl(virtbase + U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
+ & (1 << (gpio & 0x07));
+ writel(val & ~(1 << (gpio & 0x07)), virtbase +
+ U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_direction_input(unsigned gpio)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (gpio > U300_GPIO_MAX)
+ return -EINVAL;
+
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask out this pin*/
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
+ /* This is not needed since it sets the bits to zero.*/
+ /* val |= (U300_GPIO_PXPCR_PIN_MODE_INPUT << (gpio*2)); */
+ writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(gpio_direction_input);
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (gpio > U300_GPIO_MAX)
+ return -EINVAL;
+
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask out this pin */
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
+ /*
+ * FIXME: configure for push/pull, open drain or open source per pin
+ * in setup. The current driver will only support push/pull.
+ */
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
+ << ((gpio & 0x07) << 1));
+ writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ gpio_set_value(gpio, value);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(gpio_direction_output);
+
+/*
+ * Enable an IRQ, edge is rising edge (!= 0) or falling edge (==0).
+ */
+void enable_irq_on_gpio_pin(unsigned gpio, int edge)
+{
+ u32 val;
+ unsigned long flags;
+ local_irq_save(flags);
+
+ val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ val |= (1 << (gpio & 0x07));
+ writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ val = readl(virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ if (edge)
+ val |= (1 << (gpio & 0x07));
+ else
+ val &= ~(1 << (gpio & 0x07));
+ writel(val, virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(enable_irq_on_gpio_pin);
+
+void disable_irq_on_gpio_pin(unsigned gpio)
+{
+ u32 val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ val &= ~(1 << (gpio & 0x07));
+ writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(disable_irq_on_gpio_pin);
+
+/* Enable (value == 0) or disable (value == 1) internal pullup */
+void gpio_pullup(unsigned gpio, int value)
+{
+ u32 val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (value) {
+ val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ writel(val | (1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ } else {
+ val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ writel(val & ~(1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(gpio_pullup);
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct u300_gpio_port *port = dev_id;
+ u32 val;
+ int pin;
+
+ /* Read event register */
+ val = readl(virtbase + U300_GPIO_PXIEV + port->number *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask with enable register */
+ val &= readl(virtbase + U300_GPIO_PXIEV + port->number *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask relevant bits */
+ val &= U300_GPIO_PXIEV_ALL_IRQ_EVENT_MASK;
+ /* ACK IRQ (clear event) */
+ writel(val, virtbase + U300_GPIO_PXIEV + port->number *
+ U300_GPIO_PORTX_SPACING);
+ /* Print message */
+ while (val != 0) {
+ unsigned gpio;
+
+ pin = __ffs(val);
+ /* mask off this pin */
+ val &= ~(1 << pin);
+ gpio = (port->number << 3) + pin;
+
+ if (gpio_pin[gpio].callback)
+ (void)gpio_pin[gpio].callback(gpio_pin[gpio].data);
+ else
+ dev_dbg(gpiodev, "stray GPIO IRQ on line %d\n",
+ gpio);
+ }
+ return IRQ_HANDLED;
+}
+
+static void gpio_set_initial_values(void)
+{
+#ifdef U300_COH901571_3
+ int i, j;
+ unsigned long flags;
+ u32 val;
+
+ /* Write default values to all pins */
+ for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++)
+ val |= (u32) (u300_gpio_config[i][j].default_output_value != DEFAULT_OUTPUT_LOW) << j;
+ local_irq_save(flags);
+ writel(val, virtbase + U300_GPIO_PXPDOR + i * U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ }
+
+ /*
+ * Put all pins that are set to either 'GPIO_OUT' or 'GPIO_NOT_USED'
+ * to output and 'GPIO_IN' to input for each port. And initialize
+ * default value on outputs.
+ */
+ for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
+ for (j = 0; j < U300_GPIO_PINS_PER_PORT; j++) {
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXPCR +
+ i * U300_GPIO_PORTX_SPACING);
+ /* Mask out this pin */
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << (j << 1));
+
+ if (u300_gpio_config[i][j].pin_usage != GPIO_IN)
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL << (j << 1));
+ writel(val, virtbase + U300_GPIO_PXPCR +
+ i * U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ }
+ }
+
+ /* Enable or disable the internal pull-ups in the GPIO ASIC block */
+ for (i = 0; i < U300_GPIO_MAX; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++)
+ val |= (u32)((u300_gpio_config[i][j].pull_up == DISABLE_PULL_UP) << j);
+ local_irq_save(flags);
+ writel(val, virtbase + U300_GPIO_PXPER + i * U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ }
+#endif
+}
+
+static int __init gpio_probe(struct platform_device *pdev)
+{
+ u32 val;
+ int err = 0;
+ int i;
+ int num_irqs;
+
+ gpiodev = &pdev->dev;
+ memset(gpio_pin, 0, sizeof(gpio_pin));
+
+ /* Get GPIO clock */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(gpiodev, "could not get GPIO clock\n");
+ goto err_no_clk;
+ }
+ err = clk_enable(clk);
+ if (err) {
+ dev_err(gpiodev, "could not enable GPIO clock\n");
+ goto err_no_clk_enable;
+ }
+
+ memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!memres)
+ goto err_no_resource;
+
+ if (request_mem_region(memres->start, memres->end - memres->start, "GPIO Controller")
+ == NULL) {
+ err = -ENODEV;
+ goto err_no_ioregion;
+ }
+
+ virtbase = ioremap(memres->start, resource_size(memres));
+ if (!virtbase) {
+ err = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_info(gpiodev, "remapped 0x%08x to %p\n",
+ memres->start, virtbase);
+
+#ifdef U300_COH901335
+ dev_info(gpiodev, "initializing GPIO Controller COH 901 335\n");
+ /* Turn on the GPIO block */
+ writel(U300_GPIO_CR_BLOCK_CLOCK_ENABLE, virtbase + U300_GPIO_CR);
+#endif
+
+#ifdef U300_COH901571_3
+ dev_info(gpiodev, "initializing GPIO Controller COH 901 571/3\n");
+ val = readl(virtbase + U300_GPIO_CR);
+ dev_info(gpiodev, "COH901571/3 block version: %d, " \
+ "number of cores: %d\n",
+ ((val & 0x0000FE00) >> 9),
+ ((val & 0x000001FC) >> 2));
+ writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR);
+#endif
+
+ gpio_set_initial_values();
+
+ for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) {
+
+ gpio_ports[num_irqs].irq =
+ platform_get_irq_byname(pdev,
+ gpio_ports[num_irqs].name);
+
+ err = request_irq(gpio_ports[num_irqs].irq,
+ gpio_irq_handler, IRQF_DISABLED,
+ gpio_ports[num_irqs].name,
+ &gpio_ports[num_irqs]);
+ if (err) {
+ dev_err(gpiodev, "cannot allocate IRQ for %s!\n",
+ gpio_ports[num_irqs].name);
+ goto err_no_irq;
+ }
+ /* Turns off PortX_irq_force */
+ writel(0x0, virtbase + U300_GPIO_PXIFR +
+ num_irqs * U300_GPIO_PORTX_SPACING);
+ }
+
+ return 0;
+
+ err_no_irq:
+ for (i = 0; i < num_irqs; i++)
+ free_irq(gpio_ports[i].irq, &gpio_ports[i]);
+ iounmap(virtbase);
+ err_no_ioremap:
+ release_mem_region(memres->start, memres->end - memres->start);
+ err_no_ioregion:
+ err_no_resource:
+ clk_disable(clk);
+ err_no_clk_enable:
+ clk_put(clk);
+ err_no_clk:
+ dev_info(gpiodev, "module ERROR:%d\n", err);
+ return err;
+}
+
+static int __exit gpio_remove(struct platform_device *pdev)
+{
+ int i;
+
+ /* Turn off the GPIO block */
+ writel(0x00000000U, virtbase + U300_GPIO_CR);
+ for (i = 0 ; i < U300_GPIO_NUM_PORTS; i++)
+ free_irq(gpio_ports[i].irq, &gpio_ports[i]);
+ iounmap(virtbase);
+ release_mem_region(memres->start, memres->end - memres->start);
+ clk_disable(clk);
+ clk_put(clk);
+ return 0;
+}
+
+static struct platform_driver gpio_driver = {
+ .driver = {
+ .name = "u300-gpio",
+ },
+ .remove = __exit_p(gpio_remove),
+};
+
+
+static int __init u300_gpio_init(void)
+{
+ return platform_driver_probe(&gpio_driver, gpio_probe);
+}
+
+static void __exit u300_gpio_exit(void)
+{
+ platform_driver_unregister(&gpio_driver);
+}
+
+arch_initcall(u300_gpio_init);
+module_exit(u300_gpio_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+
+#ifdef U300_COH901571_3
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 571/3 GPIO driver");
+#endif
+
+#ifdef U300_COH901335
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335 GPIO driver");
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 137a8ca67822..a971e3d043ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1296,7 +1296,7 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-int gpio_request_array(struct gpio *array, size_t num)
+int gpio_request_array(const struct gpio *array, size_t num)
{
int i, err;
@@ -1319,7 +1319,7 @@ EXPORT_SYMBOL_GPL(gpio_request_array);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-void gpio_free_array(struct gpio *array, size_t num)
+void gpio_free_array(const struct gpio *array, size_t num)
{
while (num--)
gpio_free((array++)->gpio);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 1b06f67e1f69..bd6571e0097a 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -63,6 +64,7 @@ struct lnw_gpio {
void *reg_base;
spinlock_t lock;
unsigned irq_base;
+ struct pci_dev *pdev;
};
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -104,11 +106,18 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u32 value;
unsigned long flags;
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -120,11 +129,19 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned long flags;
lnw_gpio_set(chip, offset, value);
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -145,6 +162,10 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
value = readl(grer) | BIT(gpio % 32);
@@ -159,6 +180,9 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
writel(value, gfer);
spin_unlock_irqrestore(&lnw->lock, flags);
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -211,6 +235,39 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
chip->irq_eoi(data);
}
+#ifdef CONFIG_PM
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_idle(struct device *dev)
+{
+ int err = pm_schedule_suspend(dev, 500);
+
+ if (!err)
+ return 0;
+
+ return -EBUSY;
+}
+
+#else
+#define lnw_gpio_runtime_suspend NULL
+#define lnw_gpio_runtime_resume NULL
+#define lnw_gpio_runtime_idle NULL
+#endif
+
+static const struct dev_pm_ops lnw_gpio_pm_ops = {
+ .runtime_suspend = lnw_gpio_runtime_suspend,
+ .runtime_resume = lnw_gpio_runtime_resume,
+ .runtime_idle = lnw_gpio_runtime_idle,
+};
+
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -270,6 +327,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.base = gpio_base;
lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
+ lnw->pdev = pdev;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
@@ -285,6 +343,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
}
spin_lock_init(&lnw->lock);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
goto done;
err5:
kfree(lnw);
@@ -302,6 +364,9 @@ static struct pci_driver lnw_gpio_driver = {
.name = "langwell_gpio",
.id_table = lnw_gpio_ids,
.probe = lnw_gpio_probe,
+ .driver = {
+ .pm = &lnw_gpio_pm_ops,
+ },
};
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 78a843947d82..0451d7ac94ac 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -24,33 +24,46 @@
#include <linux/of_gpio.h>
#endif
-#define PCA953X_INPUT 0
-#define PCA953X_OUTPUT 1
-#define PCA953X_INVERT 2
-#define PCA953X_DIRECTION 3
-
-#define PCA953X_GPIOS 0x00FF
-#define PCA953X_INT 0x0100
+#define PCA953X_INPUT 0
+#define PCA953X_OUTPUT 1
+#define PCA953X_INVERT 2
+#define PCA953X_DIRECTION 3
+
+#define PCA957X_IN 0
+#define PCA957X_INVRT 1
+#define PCA957X_BKEN 2
+#define PCA957X_PUPD 3
+#define PCA957X_CFG 4
+#define PCA957X_OUT 5
+#define PCA957X_MSK 6
+#define PCA957X_INTS 7
+
+#define PCA_GPIO_MASK 0x00FF
+#define PCA_INT 0x0100
+#define PCA953X_TYPE 0x1000
+#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
- { "pca9534", 8 | PCA953X_INT, },
- { "pca9535", 16 | PCA953X_INT, },
- { "pca9536", 4, },
- { "pca9537", 4 | PCA953X_INT, },
- { "pca9538", 8 | PCA953X_INT, },
- { "pca9539", 16 | PCA953X_INT, },
- { "pca9554", 8 | PCA953X_INT, },
- { "pca9555", 16 | PCA953X_INT, },
- { "pca9556", 8, },
- { "pca9557", 8, },
-
- { "max7310", 8, },
- { "max7312", 16 | PCA953X_INT, },
- { "max7313", 16 | PCA953X_INT, },
- { "max7315", 8 | PCA953X_INT, },
- { "pca6107", 8 | PCA953X_INT, },
- { "tca6408", 8 | PCA953X_INT, },
- { "tca6416", 16 | PCA953X_INT, },
+ { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9536", 4 | PCA953X_TYPE, },
+ { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
+ { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9556", 8 | PCA953X_TYPE, },
+ { "pca9557", 8 | PCA953X_TYPE, },
+ { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
+ { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+
+ { "max7310", 8 | PCA953X_TYPE, },
+ { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
/* NYET: { "tca6424", 24, }, */
{ }
};
@@ -75,16 +88,32 @@ struct pca953x_chip {
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
+ int chip_type;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
{
- int ret;
+ int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
- else
- ret = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ else {
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ ret = i2c_smbus_write_word_data(chip->client,
+ reg << 1, val);
+ break;
+ case PCA957X_TYPE:
+ ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+ val & 0xff);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << 1) + 1,
+ (val & 0xff00) >> 8);
+ break;
+ }
+ }
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
@@ -116,13 +145,22 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -138,7 +176,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -149,7 +187,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -157,7 +203,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
/* then direction */
reg_val = chip->reg_direction & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -172,12 +226,20 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &reg_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -194,7 +256,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -204,7 +266,15 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -322,9 +392,17 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
- int ret;
-
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &cur_stat);
+ int ret, offset = 0;
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &cur_stat);
if (ret)
return 0;
@@ -372,14 +450,21 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- int ret;
+ int ret, offset = 0;
if (pdata->irq_base != -1
- && (id->driver_data & PCA953X_INT)) {
+ && (id->driver_data & PCA_INT)) {
int lvl;
- ret = pca953x_read_reg(chip, PCA953X_INPUT,
- &chip->irq_stat);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
if (ret)
goto out_failed;
@@ -439,7 +524,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
+ if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -499,12 +584,65 @@ pca953x_get_alt_pdata(struct i2c_client *client)
}
#endif
+static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+
+ ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ if (ret)
+ goto out;
+
+ ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
+ &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+ uint16_t val = 0;
+
+ /* Let every port in proper state, that could save power */
+ pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
+ pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
+ pca953x_write_reg(chip, PCA957X_OUT, 0x0);
+
+ ret = pca953x_read_reg(chip, PCA957X_IN, &val);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ pca953x_write_reg(chip, PCA957X_INVRT, invert);
+
+ /* To enable register 6, 7 to controll pull up and pull down */
+ pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
+
+ return 0;
+out:
+ return ret;
+}
+
static int __devinit pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
- int ret;
+ int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
@@ -531,25 +669,20 @@ static int __devinit pca953x_probe(struct i2c_client *client,
chip->gpio_start = pdata->gpio_base;
chip->names = pdata->names;
+ chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, id->driver_data & PCA953X_GPIOS);
+ pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
- ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
- if (ret)
- goto out_failed;
-
- ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction);
- if (ret)
- goto out_failed;
-
- /* set platform specific polarity inversion */
- ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
- if (ret)
+ if (chip->chip_type == PCA953X_TYPE)
+ device_pca953x_init(chip, pdata->invert);
+ else if (chip->chip_type == PCA957X_TYPE)
+ device_pca957x_init(chip, pdata->invert);
+ else
goto out_failed;
ret = pca953x_irq_setup(chip, id);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index f970a5f3585e..36919e77c495 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -283,8 +283,10 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#define pch_gpio_resume NULL
#endif
+#define PCI_VENDOR_ID_ROHM 0x10DB
static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
new file mode 100644
index 000000000000..8d1ddfdd63eb
--- /dev/null
+++ b/drivers/gpio/tps65910-gpio.c
@@ -0,0 +1,100 @@
+/*
+ * tps65910-gpio.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/tps65910.h>
+
+static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ uint8_t val;
+
+ tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+
+ if (val & GPIO_STS_MASK)
+ return 1;
+
+ return 0;
+}
+
+static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+ if (value)
+ tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
+ else
+ tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
+}
+
+static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+ /* Set the initial value */
+ tps65910_gpio_set(gc, 0, value);
+
+ return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_CFG_MASK);
+}
+
+static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+ return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ GPIO_CFG_MASK);
+}
+
+void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+{
+ int ret;
+
+ if (!gpio_base)
+ return;
+
+ tps65910->gpio.owner = THIS_MODULE;
+ tps65910->gpio.label = tps65910->i2c_client->name;
+ tps65910->gpio.dev = tps65910->dev;
+ tps65910->gpio.base = gpio_base;
+
+ switch(tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps65910->gpio.ngpio = 6;
+ case TPS65911:
+ tps65910->gpio.ngpio = 9;
+ default:
+ return;
+ }
+ tps65910->gpio.can_sleep = 1;
+
+ tps65910->gpio.direction_input = tps65910_gpio_input;
+ tps65910->gpio.direction_output = tps65910_gpio_output;
+ tps65910->gpio.set = tps65910_gpio_set;
+ tps65910->gpio.get = tps65910_gpio_get;
+
+ ret = gpiochip_add(&tps65910->gpio);
+
+ if (ret)
+ dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3e257a50bf56..61e1ef90d4e5 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -46,10 +46,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
list_for_each_entry(entry, &dev->maplist, head) {
/*
* Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we ignore the map
- * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that each driver will have only one resource of
- * each type.
+ * while PCI resources may live above that, we only compare the
+ * lower 32 bits of the map offset for maps of type
+ * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+ * It is assumed that if a driver have more than one resource
+ * of each type, the lower 32 bits are different.
*/
if (!entry->map ||
map->type != entry->map->type ||
@@ -59,9 +60,12 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
case _DRM_SHM:
if (map->flags != _DRM_CONTAINS_LOCK)
break;
+ return entry;
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- return entry;
+ if ((entry->map->offset & 0xffffffff) ==
+ (map->offset & 0xffffffff))
+ return entry;
default: /* Make gcc happy */
;
}
@@ -183,9 +187,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return -EINVAL;
}
#endif
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 872747c5a544..21058e6ad2b8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1113,7 +1113,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (card_res->count_fbs >= fb_count) {
copied = 0;
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
- list_for_each_entry(fb, &file_priv->fbs, head) {
+ list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (put_user(fb->base.id, fb_id + copied)) {
ret = -EFAULT;
goto out;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a9357c66ff8..09292193dafe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
bad:
if (raw_edid) {
- DRM_ERROR("Raw EDID:\n");
+ printk(KERN_ERR "Raw EDID:\n");
print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk("\n");
+ printk(KERN_ERR "\n");
}
return 0;
}
@@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
return ret == 2 ? 0 : -1;
}
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+ int i;
+ u32 *raw_edid = (u32 *)in_edid;
+
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
+ return true;
+}
+
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
@@ -273,6 +284,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
goto out;
if (drm_edid_block_valid(block))
break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
+ }
}
if (i == 4)
goto carp;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 74e4ff578017..4012fe423460 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
#include "drmP.h"
/** @file drm_gem.c
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d61d185cf040..4a058c7af6c0 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -28,6 +28,7 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
+#include <linux/ratelimit.h>
#include "drmP.h"
#include "drm_core.h"
@@ -253,10 +254,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
return -EFAULT;
m32.handle = (unsigned long)handle;
- if (m32.handle != (unsigned long)handle && printk_ratelimit())
- printk(KERN_ERR "compat_drm_addmap truncated handle"
- " %p for type %d offset %x\n",
- handle, m32.type, m32.offset);
+ if (m32.handle != (unsigned long)handle)
+ printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
+ " %p for type %d offset %x\n",
+ handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e1aee4f6a7c6..b6a19cb07caf 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -251,7 +251,7 @@ err:
}
-int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
@@ -292,6 +292,7 @@ static struct drm_bus drm_pci_bus = {
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
+ .irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
};
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2c3fcbdfd8ff..5db96d45fc71 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -526,7 +526,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
- return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+ return dev->hose->dense_mem_base;
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 51c2257b11e6..4d46441cbe2d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -776,7 +776,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
seq_printf(m, " seqno: 0x%08x\n", error->seqno);
- for (i = 0; i < 16; i++)
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
if (error->active_bo)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e9974bf2..2b79588541e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2182,9 +2182,8 @@ int i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
- i915_gem_free_all_phys_object(dev);
-
mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ee660355ae68..f63ee162f124 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -716,6 +716,7 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
atomic_t forcewake_count;
} drm_i915_private_t;
@@ -909,13 +910,6 @@ struct drm_i915_file_private {
} mm;
};
-enum intel_chip_family {
- CHIP_I8XX = 0x01,
- CHIP_I9XX = 0x02,
- CHIP_I915 = 0x04,
- CHIP_I965 = 0x08,
-};
-
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0b2e167d2bce..85f713746a1f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -354,13 +355,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_offset = offset & (PAGE_SIZE-1);
+ page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_offset = offset & ~PAGE_MASK;
+ shmem_page_offset = offset_in_page(offset);
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = data_ptr & ~PAGE_MASK;
+ data_page_offset = offset_in_page(data_ptr);
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -463,10 +463,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
if (do_bit17_swizzling) {
slow_shmem_bit17_copy(page,
@@ -638,8 +639,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
- page_offset = offset & (PAGE_SIZE-1);
+ page_base = offset & PAGE_MASK;
+ page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
@@ -650,7 +651,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
page_offset, user_data, page_length))
-
return -EFAULT;
remain -= page_length;
@@ -730,9 +730,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
* page_length = bytes to copy for this page
*/
gtt_page_base = offset & PAGE_MASK;
- gtt_page_offset = offset & ~PAGE_MASK;
+ gtt_page_offset = offset_in_page(offset);
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = data_ptr & ~PAGE_MASK;
+ data_page_offset = offset_in_page(data_ptr);
page_length = remain;
if ((gtt_page_offset + page_length) > PAGE_SIZE)
@@ -791,13 +791,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_offset = offset & (PAGE_SIZE-1);
+ page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -896,9 +895,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_offset = offset & ~PAGE_MASK;
+ shmem_page_offset = offset_in_page(offset);
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = data_ptr & ~PAGE_MASK;
+ data_page_offset = offset_in_page(data_ptr);
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -906,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1218,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
@@ -1450,8 +1448,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
* edge of an even tile row (where tile rows are counted as if the bo is
* placed in a fenced gtt region).
*/
- if (IS_GEN2(dev) ||
- (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ if (IS_GEN2(dev))
+ tile_height = 16;
+ else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_height = 32;
else
tile_height = 8;
@@ -1556,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -1699,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
+ * backing pages, *now*.
*/
inode = obj->base.filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
@@ -2924,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
- i915_gem_release_mmap(obj);
-
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
i915_gem_info_add_obj(dev_priv, size);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5b818f..4934cf84c320 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- /* blow away mappings if mapped through GTT */
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
- i915_gem_release_mmap(obj);
-
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b79619a7b788..ae2b49969b99 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -517,7 +517,7 @@ irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PIPEA_VBLANK_IVB)
drm_handle_vblank(dev, 0);
- if (de_iir & DE_PIPEB_VBLANK_IVB);
+ if (de_iir & DE_PIPEB_VBLANK_IVB)
drm_handle_vblank(dev, 1);
/* check event from PCH */
@@ -1740,6 +1740,17 @@ void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
+ if (IS_GEN6(dev)) {
+ /* Workaround stalls observed on Sandy Bridge GPUs by
+ * making the blitter command streamer generate a
+ * write to the Hardware Status Page for
+ * MI_USER_INTERRUPT. This appears to serialize the
+ * previous seqno write out before the interrupt
+ * happens.
+ */
+ I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
+ }
/* XXX hotplug from PCH */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af8e62e..5d5def756c9e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -531,6 +531,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2b5264..e8152d23d5b6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -678,6 +678,7 @@ void i915_save_display(struct drm_device *dev)
}
/* VGA state */
+ mutex_lock(&dev->struct_mutex);
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -687,6 +688,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_restore_display(struct drm_device *dev)
@@ -780,6 +782,8 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
+ mutex_lock(&dev->struct_mutex);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -787,6 +791,7 @@ void i915_restore_display(struct drm_device *dev)
udelay(150);
i915_restore_vga(dev);
+ mutex_unlock(&dev->struct_mutex);
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e93f93cc7e78..0979d8877880 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -288,6 +288,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
+ *
+ * On the other hand, what should we do if it is a broken EDID?
*/
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -298,6 +300,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f553ddfdc168..aa43e7be6053 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3983,54 +3983,6 @@ static void i830_update_wm(struct drm_device *dev)
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
-static bool ironlake_compute_wm0(struct drm_device *dev,
- int pipe,
- const struct intel_watermark_params *display,
- int display_latency_ns,
- const struct intel_watermark_params *cursor,
- int cursor_latency_ns,
- int *plane_wm,
- int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int htotal, hdisplay, clock, pixel_size;
- int line_time_us, line_count;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_pipe(dev, pipe);
- if (crtc->fb == NULL || !crtc->enabled)
- return false;
-
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *plane_wm = entries + display->guard_size;
- if (*plane_wm > (int)display->max_wm)
- *plane_wm = display->max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
- line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * 64 * pixel_size;
- tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
- if (*cursor_wm > (int)cursor->max_wm)
- *cursor_wm = (int)cursor->max_wm;
-
- return true;
-}
-
/*
* Check the wm result.
*
@@ -4139,12 +4091,12 @@ static void ironlake_update_wm(struct drm_device *dev)
unsigned int enabled;
enabled = 0;
- if (ironlake_compute_wm0(dev, 0,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4153,12 +4105,12 @@ static void ironlake_update_wm(struct drm_device *dev)
enabled |= 1;
}
- if (ironlake_compute_wm0(dev, 1,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -4223,10 +4175,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
unsigned int enabled;
enabled = 0;
- if (ironlake_compute_wm0(dev, 0,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4235,10 +4187,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 1;
}
- if (ironlake_compute_wm0(dev, 1,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -4735,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5265,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- if (!HAS_PCH_SPLIT(dev))
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -7675,6 +7626,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a4d80314e7f8..391b55f1cc74 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,8 +59,6 @@ struct intel_dp {
bool is_pch_edp;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- struct drm_property *force_audio_property;
};
/**
@@ -1702,7 +1700,7 @@ intel_dp_set_property(struct drm_connector *connector,
if (ret)
return ret;
- if (property == intel_dp->force_audio_property) {
+ if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
@@ -1841,16 +1839,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- intel_dp->force_audio_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
- if (intel_dp->force_audio_property) {
- intel_dp->force_audio_property->values[0] = -1;
- intel_dp->force_audio_property->values[1] = 1;
- drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
- }
-
+ intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 831d7a4a0d18..9ffa61eb4d7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,6 +236,7 @@ struct intel_unpin_work {
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f289b8642976..aa0a8e83142e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,7 +45,6 @@ struct intel_hdmi {
bool has_hdmi_sink;
bool has_audio;
int force_audio;
- struct drm_property *force_audio_property;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -194,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
- return MODE_CLOCK_HIGH;
+ return MODE_CLOCK_LOW;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -287,7 +286,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
if (ret)
return ret;
- if (property == intel_hdmi->force_audio_property) {
+ if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
@@ -365,16 +364,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- intel_hdmi->force_audio_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
- if (intel_hdmi->force_audio_property) {
- intel_hdmi->force_audio_property->values[0] = -1;
- intel_hdmi->force_audio_property->values[1] = 1;
- drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
- }
-
+ intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d3b903bce7c5..d98cee60b602 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -401,8 +401,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
- if (IS_GEN2(dev))
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 67cb076d271b..b28f7bd9f88a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -727,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 9034dd8f33c7..3b26a3ba02dd 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -81,6 +81,36 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
+static const char *force_audio_names[] = {
+ "off",
+ "auto",
+ "on",
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "audio",
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+ drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_connector_attach_property(connector, prop, 0);
+}
+
static const char *broadcast_rgb_names[] = {
"Full",
"Limited 16:235",
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c006982e..56a8e2aea19c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1416,6 +1416,8 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = reg_bo;
+ mutex_lock(&dev->struct_mutex);
+
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
@@ -1440,6 +1442,8 @@ void intel_setup_overlay(struct drm_device *dev)
}
}
+ mutex_unlock(&dev->struct_mutex);
+
/* init all values */
overlay->color_key = 0x0101fe;
overlay->brightness = -19;
@@ -1464,6 +1468,7 @@ out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
+ mutex_unlock(&dev->struct_mutex);
out_free:
kfree(overlay);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 754086f83941..30fe554d8936 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -148,8 +148,6 @@ struct intel_sdvo_connector {
int format_supported_num;
struct drm_property *tv_format;
- struct drm_property *force_audio_property;
-
/* add the property for the SDVO-TV */
struct drm_property *left;
struct drm_property *right;
@@ -1712,7 +1710,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret)
return ret;
- if (property == intel_sdvo_connector->force_audio_property) {
+ if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
@@ -2037,15 +2035,7 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
- connector->force_audio_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
- if (connector->force_audio_property) {
- connector->force_audio_property->values[0] = -1;
- connector->force_audio_property->values[1] = 1;
- drm_connector_attach_property(&connector->base.base,
- connector->force_audio_property, 0);
- }
-
+ intel_attach_force_audio_property(&connector->base.base);
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
intel_attach_broadcast_rgb_property(&connector->base.base);
}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 1084fa4d261b..54558a01969a 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -195,29 +195,10 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
-#if defined(__linux__) && defined(__alpha__)
-#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
-#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
-
-#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
-#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
-
-#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
-#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
-#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
-
-static inline u32 _MGA_READ(u32 *addr)
-{
- DRM_MEMORYBARRIER();
- return *(volatile u32 *)addr;
-}
-#else
#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
-#endif
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f0d459bb46e4..525744d593c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
vga_count++;
retval = nouveau_dsm_pci_probe(pdev);
- printk("ret val is %d\n", retval);
if (retval & NOUVEAU_DSM_HAS_MUX)
has_dsm |= 1;
if (retval & NOUVEAU_DSM_HAS_OPT)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4b9f4493c9f9..7347075ca5b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
int ret;
if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 3);
+ ret = RING_SPACE(chan, 4);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
+ OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
OUT_RING (chan, 1);
} else
@@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
- ret = RING_SPACE(chan, 5);
+ ret = RING_SPACE(chan, 7);
if (ret)
return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
int ret;
if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 4);
+ ret = RING_SPACE(chan, 5);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
+ OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
@@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
- ret = RING_SPACE(chan, 5);
+ ret = RING_SPACE(chan, 7);
if (ret)
return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
- if (dev_priv->card_type >= NV_C0)
- goto out_initialised;
+ if (dev_priv->card_type < NV_C0) {
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
+ if (ret)
+ return ret;
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
- if (ret)
- return ret;
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
- /* we leave subchannel empty for nvc0 */
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING (chan, NvSw);
+ FIRE_RING (chan);
+ }
- /* Create a DMA object for the shared cross-channel sync area. */
+ /* Setup area of memory shared between all channels for x-chan sync */
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
@@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING(chan, NvSema);
- } else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle); /* whole VM */
}
- FIRE_RING(chan);
-
-out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 053edf9d2f67..ba896e54b799 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -900,6 +900,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
}
/* NV11 and NV20 don't have this, they stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
@@ -1003,6 +1004,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
}
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2960f583dc38..5ee14d216ce8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
} else
- if (drm_pci_device_is_pcie(dev) &&
+ if (0 && drm_pci_device_is_pcie(dev) &&
dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -868,7 +868,9 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
+
mem->mm_node = NULL;
+ kfree(node);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 922fb6b664ed..ef9dec0e6f8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
entries = perf[2];
}
+ if (entries > NOUVEAU_PM_MAX_LEVEL) {
+ NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
+ entries = NOUVEAU_PM_MAX_LEVEL;
+ }
+
entry = perf + headerlen;
for (i = 0; i < entries; i++) {
struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c77111eca6ac..82fad914e648 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
- if (drm_pci_device_is_pcie(dev) &&
+ if (0 && drm_pci_device_is_pcie(dev) &&
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 38ea662568c1..144f79a350ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
+ case 0xD0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
@@ -563,68 +564,68 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_timer;
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_graph_create(dev);
- break;
- case NV_10:
- nv10_graph_create(dev);
- break;
- case NV_20:
- case NV_30:
- nv20_graph_create(dev);
- break;
- case NV_40:
- nv40_graph_create(dev);
- break;
- case NV_50:
- nv50_graph_create(dev);
- break;
- case NV_C0:
- nvc0_graph_create(dev);
- break;
- default:
- break;
- }
-
- switch (dev_priv->chipset) {
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0xa0:
- nv84_crypt_create(dev);
- break;
- }
+ if (!nouveau_noaccel) {
+ switch (dev_priv->card_type) {
+ case NV_04:
+ nv04_graph_create(dev);
+ break;
+ case NV_10:
+ nv10_graph_create(dev);
+ break;
+ case NV_20:
+ case NV_30:
+ nv20_graph_create(dev);
+ break;
+ case NV_40:
+ nv40_graph_create(dev);
+ break;
+ case NV_50:
+ nv50_graph_create(dev);
+ break;
+ case NV_C0:
+ nvc0_graph_create(dev);
+ break;
+ default:
+ break;
+ }
- switch (dev_priv->card_type) {
- case NV_50:
switch (dev_priv->chipset) {
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaf:
- nva3_copy_create(dev);
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ nv84_crypt_create(dev);
break;
}
- break;
- case NV_C0:
- nvc0_copy_create(dev, 0);
- nvc0_copy_create(dev, 1);
- break;
- default:
- break;
- }
- if (dev_priv->card_type == NV_40)
- nv40_mpeg_create(dev);
- else
- if (dev_priv->card_type == NV_50 &&
- (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
- nv50_mpeg_create(dev);
+ switch (dev_priv->card_type) {
+ case NV_50:
+ switch (dev_priv->chipset) {
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaf:
+ nva3_copy_create(dev);
+ break;
+ }
+ break;
+ case NV_C0:
+ nvc0_copy_create(dev, 0);
+ nvc0_copy_create(dev, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (dev_priv->card_type == NV_40)
+ nv40_mpeg_create(dev);
+ else
+ if (dev_priv->card_type == NV_50 &&
+ (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
+ nv50_mpeg_create(dev);
- if (!nouveau_noaccel) {
for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
if (dev_priv->eng[e]) {
ret = dev_priv->eng[e]->init(dev, e);
@@ -880,8 +881,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
#ifdef __BIG_ENDIAN
/* Put the card in BE mode if it's not */
- if (nv_rd32(dev, NV03_PMC_BOOT_1))
- nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
+ if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
+ nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
DRM_MEMORYBARRIER();
#endif
@@ -922,6 +923,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->card_type = NV_50;
break;
case 0xc0:
+ case 0xd0:
dev_priv->card_type = NV_C0;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 0059e6f58a8b..519a6b4bba46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -58,6 +58,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
num -= len;
pte += len;
if (unlikely(end >= max)) {
+ phys += len << (bits + 12);
pde++;
pte = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 3c78bc81357e..f1a3ae491995 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -376,7 +376,10 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
*/
/* framebuffer can be larger than crtc scanout area. */
- regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+ XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ regp->CRTC[NV_CIO_CRE_42] =
+ XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -824,8 +827,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ regp->CRTC[NV_CIO_CRE_42] =
+ XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 74a3f6872701..08da478ba544 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -409,7 +409,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *evo = dispc->sync;
int ret;
- ret = RING_SPACE(evo, 24);
+ ret = RING_SPACE(evo, chan ? 25 : 27);
if (unlikely(ret))
return ret;
@@ -458,8 +458,19 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* queue the flip on the crtc's "display sync" channel */
BEGIN_RING(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
- BEGIN_RING(evo, 0, 0x0084, 5);
- OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
+ if (chan) {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000100);
+ } else {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000010);
+ /* allows gamma somehow, PDISP will bitch at you if
+ * you don't wait for vblank before changing this..
+ */
+ BEGIN_RING(evo, 0, 0x00e0, 1);
+ OUT_RING (evo, 0x40000000);
+ }
+ BEGIN_RING(evo, 0, 0x0088, 4);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index fe0f253089ac..bbfb1a68fb11 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -277,6 +277,8 @@
# define NV_CIO_CRE_EBR_VDE_11 2:2
# define NV_CIO_CRE_EBR_VRS_11 4:4
# define NV_CIO_CRE_EBR_VBS_11 6:6
+# define NV_CIO_CRE_42 0x42
+# define NV_CIO_CRE_42_OFFSET_11 6:6
# define NV_CIO_CRE_43 0x43
# define NV_CIO_CRE_44 0x44 /* head control */
# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 9746fee59f56..ea92bbe3ed37 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -28,11 +28,4 @@ config DRM_RADEON_KMS
The kernel will also perform security check on command stream
provided by the user, we want to catch and forbid any illegal use
of the GPU such as DMA into random system memory or into memory
- not owned by the process supplying the command stream. This part
- of the code is still incomplete and this why we propose that patch
- as a staging driver addition, future security might forbid current
- experimental userspace to run.
-
- This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
- (radeon up to X1950). Works is underway to provide support for R6XX,
- R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
+ not owned by the process supplying the command stream.
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 49611e2365d9..1b50ad8919d5 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1200,6 +1200,7 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14
// ucConfig
#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ec848787d7d9..9541995e4b21 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -671,6 +671,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
DISPPLL_CONFIG_DUAL_LINK;
}
}
+ if (radeon_encoder_is_dp_bridge(encoder)) {
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+ args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
+ } else
+ args.v3.sInput.ucExtTransmitterID = 0;
+
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
@@ -1045,7 +1052,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
- u32 tmp;
+ u32 tmp, viewport_w, viewport_h;
int r;
/* no fb bound */
@@ -1171,8 +1178,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
y &= ~1;
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
(x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
- (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+ (viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
@@ -1213,7 +1222,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
- u32 tmp;
+ u32 tmp, viewport_w, viewport_h;
int r;
/* no fb bound */
@@ -1338,8 +1347,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
y &= ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
(x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
- (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+ (viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index e148ab04b80b..7b4eeb7b4a8c 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -39,17 +39,335 @@
const u32 cayman_default_state[] =
{
- /* XXX fill in additional blit state */
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
0xc0026900,
- 0x00000316,
- 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- 0x00000010, /* */
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0036900,
+ 0x0000000f,
+ 0x00000000, /* DB_DEPTH_INFO */
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
0xc0026900,
0x000000d9,
0x00000000, /* CP_RINGID */
0x00000000, /* CP_VMID */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000100, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0026900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+ 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0106900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+ 0x00000000, /* SPI_GPR_MGMT */
+ 0x00000000, /* SPI_LDS_MGMT */
+ 0x00000000, /* SPI_STACK_MGMT */
+ 0x00000000, /* SPI_WAVE_MGMT_1 */
+ 0x00000000, /* SPI_WAVE_MGMT_2 */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc00e6900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+ 0x00000000, /* DB_EQAA */
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000,
+
+ 0xc01b6900,
+ 0x000002f5,
+ 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+ 0xffffffff,
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+const u32 cayman_vs[] =
+{
+ 0x00000004,
+ 0x80400400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15000688,
+ 0x00000000,
+ 0x88000000,
+ 0x04000000,
+ 0x67961001,
+#ifdef __BIG_ENDIAN
+ 0x00020000,
+#else
+ 0x00000000,
+#endif
+ 0x00000000,
+ 0x04000000,
+ 0x67961000,
+#ifdef __BIG_ENDIAN
+ 0x00020008,
+#else
+ 0x00000008,
+#endif
+ 0x00000000,
+};
+
+const u32 cayman_ps[] =
+{
+ 0x00000004,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95000688,
+ 0x00000000,
+ 0x88000000,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
};
+const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
+const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
index 33b75e5d0fa4..f5d0e9a60267 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -25,8 +25,11 @@
#ifndef CAYMAN_BLIT_SHADERS_H
#define CAYMAN_BLIT_SHADERS_H
+extern const u32 cayman_ps[];
+extern const u32 cayman_vs[];
extern const u32 cayman_default_state[];
+extern const u32 cayman_ps_size, cayman_vs_size;
extern const u32 cayman_default_size;
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7c37638095f7..12d2fdc52414 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -88,21 +88,40 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* get temperature in millidegrees */
int evergreen_get_temp(struct radeon_device *rdev)
{
- u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
- ASIC_T_SHIFT;
- u32 actual_temp = 0;
-
- if (temp & 0x400)
- actual_temp = -256;
- else if (temp & 0x200)
- actual_temp = 255;
- else if (temp & 0x100) {
- actual_temp = temp & 0x1ff;
- actual_temp |= ~0x1ff;
- } else
- actual_temp = temp & 0xff;
+ u32 temp, toffset;
+ int actual_temp = 0;
+
+ if (rdev->family == CHIP_JUNIPER) {
+ toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
+ TOFFSET_SHIFT;
+ temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
+ TS0_ADC_DOUT_SHIFT;
+
+ if (toffset & 0x100)
+ actual_temp = temp / 2 - (0x200 - toffset);
+ else
+ actual_temp = temp / 2 + toffset;
+
+ actual_temp = actual_temp * 1000;
+
+ } else {
+ temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+
+ if (temp & 0x400)
+ actual_temp = -256;
+ else if (temp & 0x200)
+ actual_temp = 255;
+ else if (temp & 0x100) {
+ actual_temp = temp & 0x1ff;
+ actual_temp |= ~0x1ff;
+ } else
+ actual_temp = temp & 0xff;
- return (actual_temp * 1000) / 2;
+ actual_temp = (actual_temp * 1000) / 2;
+ }
+
+ return actual_temp;
}
int sumo_get_temp(struct radeon_device *rdev)
@@ -121,11 +140,17 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if (voltage->type == VOLTAGE_SW) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->vddci == 0xff01)
+ return;
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
rdev->pm.current_vddci = voltage->vddci;
@@ -1415,6 +1440,8 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
force_no_swizzle = false;
@@ -1544,6 +1571,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev)
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
default:
@@ -1689,6 +1718,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
+ case CHIP_SUMO:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ if (rdev->pdev->device == 0x9648)
+ rdev->config.evergreen.max_simds = 3;
+ else if ((rdev->pdev->device == 0x9647) ||
+ (rdev->pdev->device == 0x964a))
+ rdev->config.evergreen.max_simds = 4;
+ else
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_SUMO2:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
rdev->config.evergreen.max_pipes = 4;
@@ -1936,9 +2013,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |= (3 << 0);
break;
}
- /* num banks is 8 on all fusion asics */
+ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 8 << 4;
+ rdev->config.evergreen.tile_config |= 1 << 4;
else
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2039,6 +2116,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
case CHIP_CAICOS:
/* no vertex cache */
sq_config &= ~VC_ENABLE;
@@ -2060,6 +2139,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
ps_thread_count = 96;
break;
default:
@@ -2099,6 +2180,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
case CHIP_CAICOS:
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
break;
@@ -2618,28 +2701,25 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
int evergreen_irq_process(struct radeon_device *rdev)
{
- u32 wptr = evergreen_get_ih_wptr(rdev);
- u32 rptr = rdev->ih.rptr;
+ u32 wptr;
+ u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
- spin_lock_irqsave(&rdev->ih.lock, flags);
+ wptr = evergreen_get_ih_wptr(rdev);
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
- if (rdev->shutdown) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
-
restart_ih:
/* display interrupts */
evergreen_irq_ack(rdev);
@@ -2868,7 +2948,7 @@ restart_ih:
radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
- DRM_DEBUG("IH: CP EOP\n");
+ DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ba06a69c6de8..57f3bc17b87e 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -31,6 +31,7 @@
#include "evergreend.h"
#include "evergreen_blit_shaders.h"
+#include "cayman_blit_shaders.h"
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
@@ -152,6 +153,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2) ||
(rdev->family == CHIP_CAICOS))
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, 48, gpu_addr);
@@ -199,6 +202,16 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
+ /* workaround some hw bugs */
+ if (x2 == 0)
+ x1 = 1;
+ if (y2 == 0)
+ y1 = 1;
+ if (rdev->family == CHIP_CAYMAN) {
+ if ((x2 == 1) && (y2 == 1))
+ x2 = 2;
+ }
+
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
@@ -255,238 +268,284 @@ set_default_state(struct radeon_device *rdev)
u64 gpu_addr;
int dwords;
- switch (rdev->family) {
- case CHIP_CEDAR:
- default:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_REDWOOD:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_JUNIPER:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_PALM:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_BARTS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_TURKS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_CAICOS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 10;
- num_gs_threads = 10;
- num_es_threads = 10;
- num_hs_threads = 10;
- num_ls_threads = 10;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- }
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_CAICOS))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (EXPORT_SRC_C |
- CS_PRIO(0) |
- LS_PRIO(0) |
- HS_PRIO(0) |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
- NUM_LS_GPRS(num_ls_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
- NUM_LS_THREADS(num_ls_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
- sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
- NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
-
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
- /* disable dyn gprs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0);
+ if (rdev->family < CHIP_CAYMAN) {
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ default:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_REDWOOD:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_JUNIPER:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_PALM:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_SUMO:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 25;
+ num_gs_threads = 25;
+ num_es_threads = 25;
+ num_hs_threads = 25;
+ num_ls_threads = 25;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_SUMO2:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 25;
+ num_gs_threads = 25;
+ num_es_threads = 25;
+ num_hs_threads = 25;
+ num_ls_threads = 25;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_BARTS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_TURKS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_CAICOS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 10;
+ num_gs_threads = 10;
+ num_es_threads = 10;
+ num_hs_threads = 10;
+ num_ls_threads = 10;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ }
- /* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2) ||
+ (rdev->family == CHIP_CAICOS))
+ sq_config = 0;
+ else
+ sq_config = VC_ENABLE;
+
+ sq_config |= (EXPORT_SRC_C |
+ CS_PRIO(0) |
+ LS_PRIO(0) |
+ HS_PRIO(0) |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+ NUM_VS_GPRS(num_vs_gprs) |
+ NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+ NUM_ES_GPRS(num_es_gprs));
+ sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+ NUM_LS_GPRS(num_ls_gprs));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+ NUM_VS_THREADS(num_vs_threads) |
+ NUM_GS_THREADS(num_gs_threads) |
+ NUM_ES_THREADS(num_es_threads));
+ sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+ NUM_LS_THREADS(num_ls_threads));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+ NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+ NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+ sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+ NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+ /* disable dyn gprs */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0);
+
+ /* SQ config */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, sq_config);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+ }
/* CONTEXT_CONTROL */
radeon_ring_write(rdev, 0xc0012800);
@@ -560,7 +619,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
- rdev->r600_blit.state_len = evergreen_default_size;
+ if (rdev->family < CHIP_CAYMAN)
+ rdev->r600_blit.state_len = evergreen_default_size;
+ else
+ rdev->r600_blit.state_len = cayman_default_size;
dwords = rdev->r600_blit.state_len;
while (dwords & 0xf) {
@@ -572,11 +634,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
rdev->r600_blit.vs_offset = obj_size;
- obj_size += evergreen_vs_size * 4;
+ if (rdev->family < CHIP_CAYMAN)
+ obj_size += evergreen_vs_size * 4;
+ else
+ obj_size += cayman_vs_size * 4;
obj_size = ALIGN(obj_size, 256);
rdev->r600_blit.ps_offset = obj_size;
- obj_size += evergreen_ps_size * 4;
+ if (rdev->family < CHIP_CAYMAN)
+ obj_size += evergreen_ps_size * 4;
+ else
+ obj_size += cayman_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
@@ -599,16 +667,29 @@ int evergreen_blit_init(struct radeon_device *rdev)
return r;
}
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- evergreen_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < evergreen_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
- for (i = 0; i < evergreen_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+ if (rdev->family < CHIP_CAYMAN) {
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+ if (num_packet2s)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ packet2s, num_packet2s * 4);
+ for (i = 0; i < evergreen_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+ for (i = 0; i < evergreen_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+ } else {
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ cayman_default_state, rdev->r600_blit.state_len * 4);
+
+ if (num_packet2s)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ packet2s, num_packet2s * 4);
+ for (i = 0; i < cayman_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
+ for (i = 0; i < cayman_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
+ }
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f37e91ee8a11..1636e3449825 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -168,10 +168,16 @@
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
/* evergreen */
+#define CG_THERMAL_CTRL 0x72c
+#define TOFFSET_MASK 0x00003FE0
+#define TOFFSET_SHIFT 5
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
-#define ASIC_T_MASK 0x7FF0000
+#define ASIC_T_MASK 0x07FF0000
#define ASIC_T_SHIFT 16
+#define CG_TS0_STATUS 0x760
+#define TS0_ADC_DOUT_MASK 0x000003FF
+#define TS0_ADC_DOUT_SHIFT 0
/* APU */
#define CG_THERMAL_STATUS 0x678
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b205ba1cdd8f..16caafeadf5e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1387,14 +1387,12 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
cayman_gpu_init(rdev);
-#if 0
- r = cayman_blit_init(rdev);
+ r = evergreen_blit_init(rdev);
if (r) {
- cayman_blit_fini(rdev);
+ evergreen_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
-#endif
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -1452,7 +1450,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
- /* int r; */
+ int r;
/* FIXME: we should wait for ring to be empty */
cayman_cp_enable(rdev, false);
@@ -1461,14 +1459,13 @@ int cayman_suspend(struct radeon_device *rdev)
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
-#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
-#endif
+
return 0;
}
@@ -1580,7 +1577,7 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- /* cayman_blit_fini(rdev); */
+ evergreen_blit_fini(rdev);
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 2fef9de7f363..686f9dc5d4bd 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -63,7 +63,7 @@ struct r100_cs_track {
unsigned num_arrays;
unsigned max_indx;
unsigned color_channel_mask;
- struct r100_cs_track_array arrays[11];
+ struct r100_cs_track_array arrays[16];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
struct r100_cs_track_cb aa;
@@ -146,6 +146,12 @@ static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6f27593901c7..f79d2ccb6755 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -87,6 +87,10 @@ MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
MODULE_FIRMWARE("radeon/PALM_me.bin");
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
+MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO_me.bin");
+MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO2_me.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -586,6 +590,9 @@ void r600_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
@@ -2024,6 +2031,14 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "PALM";
rlc_chip_name = "SUMO";
break;
+ case CHIP_SUMO:
+ chip_name = "SUMO";
+ rlc_chip_name = "SUMO";
+ break;
+ case CHIP_SUMO2:
+ chip_name = "SUMO2";
+ rlc_chip_name = "SUMO";
+ break;
default: BUG();
}
@@ -3282,27 +3297,26 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
int r600_irq_process(struct radeon_device *rdev)
{
- u32 wptr = r600_get_ih_wptr(rdev);
- u32 rptr = rdev->ih.rptr;
+ u32 wptr;
+ u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
+ wptr = r600_get_ih_wptr(rdev);
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
- if (rdev->shutdown) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
restart_ih:
/* display interrupts */
@@ -3432,7 +3446,7 @@ restart_ih:
radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
- DRM_DEBUG("IH: CP EOP\n");
+ DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index fd18be9871ab..909bda8dd550 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,20 +71,21 @@ struct r600_cs_track {
u64 db_bo_mc;
};
-#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
-#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
-#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
-#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
-#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
-#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
-#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
-#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
+#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
+#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
+#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
struct gpu_formats {
unsigned blockwidth;
unsigned blockheight;
unsigned blocksize;
unsigned valid_color;
+ enum radeon_family min_family;
};
static const struct gpu_formats color_formats_table[] = {
@@ -154,7 +155,11 @@ static const struct gpu_formats color_formats_table[] = {
[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
[V_038004_FMT_BC5] = { 4, 4, 16, 0},
+ [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+ [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+ /* The other Evergreen formats */
+ [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
};
static inline bool fmt_is_valid_color(u32 format)
@@ -168,11 +173,14 @@ static inline bool fmt_is_valid_color(u32 format)
return false;
}
-static inline bool fmt_is_valid_texture(u32 format)
+static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
+ if (family < color_formats_table[format].min_family)
+ return false;
+
if (color_formats_table[format].blockwidth > 0)
return true;
@@ -1325,7 +1333,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
format = G_038004_DATA_FORMAT(word1);
- if (!fmt_is_valid_texture(format)) {
+ if (!fmt_is_valid_texture(format, p->family)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
__func__, __LINE__, format);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index b2b944bcd05a..f140a0d5cb54 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1309,6 +1309,9 @@
#define V_038004_FMT_BC3 0x00000033
#define V_038004_FMT_BC4 0x00000034
#define V_038004_FMT_BC5 0x00000035
+#define V_038004_FMT_BC6 0x00000036
+#define V_038004_FMT_BC7 0x00000037
+#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ba643b576054..ef0e0e016914 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -165,6 +165,7 @@ struct radeon_clock {
uint32_t default_sclk;
uint32_t default_dispclk;
uint32_t dp_extclk;
+ uint32_t max_pixel_clock;
};
/*
@@ -178,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d948265db87e..b2449629537d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = NULL,
- .copy_dma = NULL,
- .copy = NULL,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
@@ -938,6 +938,13 @@ static struct radeon_asic cayman_asic = {
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
+
+ /* set the number of crtcs */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ rdev->num_crtc = 1;
+ else
+ rdev->num_crtc = 2;
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -1017,18 +1024,32 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
+ /* set num crtcs */
+ if (rdev->family == CHIP_CEDAR)
+ rdev->num_crtc = 4;
+ else
+ rdev->num_crtc = 6;
rdev->asic = &evergreen_asic;
break;
case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
rdev->asic = &sumo_asic;
break;
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
+ /* set num crtcs */
+ if (rdev->family == CHIP_CAICOS)
+ rdev->num_crtc = 4;
+ else
+ rdev->num_crtc = 6;
rdev->asic = &btc_asic;
break;
case CHIP_CAYMAN:
rdev->asic = &cayman_asic;
+ /* set num crtcs */
+ rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
@@ -1040,18 +1061,6 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic->set_memory_clock = NULL;
}
- /* set the number of crtcs */
- if (rdev->flags & RADEON_SINGLE_CRTC)
- rdev->num_crtc = 1;
- else {
- if (ASIC_IS_DCE41(rdev))
- rdev->num_crtc = 2;
- else if (ASIC_IS_DCE4(rdev))
- rdev->num_crtc = 6;
- else
- rdev->num_crtc = 2;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 90dfb2b8cf03..bf2b61584cdb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1246,6 +1246,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
}
*dcpll = *p1pll;
+ rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+ if (rdev->clock.max_pixel_clock == 0)
+ rdev->clock.max_pixel_clock = 40000;
+
return true;
}
@@ -2316,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->r600.usVDDC);
}
+ /* patch up vddc if necessary */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
+ u16 vddc;
+
+ if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+ }
+
if (rdev->flags & RADEON_IS_IGP) {
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2603,6 +2615,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage_level == 0xff01)
+ return;
+
switch (crev) {
case 1:
args.v1.ucVoltageType = voltage_type;
@@ -2622,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+ u16 *voltage)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+ switch (crev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+ args.v2.ucVoltageMode = 0;
+ args.v2.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 5249af8931e6..2d48e7a1474b 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -117,7 +117,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
if (p1pll->reference_div < 2)
p1pll->reference_div = 12;
- p2pll->reference_div = p1pll->reference_div;
+ p2pll->reference_div = p1pll->reference_div;
/* These aren't in the device-tree */
if (rdev->family >= CHIP_R420) {
@@ -139,6 +139,8 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
p2pll->pll_out_min = 12500;
p2pll->pll_out_max = 35000;
}
+ /* not sure what the max should be in all cases */
+ rdev->clock.max_pixel_clock = 35000;
spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
spll->reference_div = mpll->reference_div =
@@ -151,7 +153,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
else
rdev->clock.default_sclk =
radeon_legacy_get_engine_clock(rdev);
-
+
val = of_get_property(dp, "ATY,MCLK", NULL);
if (val && *val)
rdev->clock.default_mclk = (*val) / 10;
@@ -160,7 +162,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
radeon_legacy_get_memory_clock(rdev);
DRM_INFO("Using device-tree clock info\n");
-
+
return true;
}
#else
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5b991f7c6e2a..e4594676a07c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -866,6 +866,11 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
rdev->clock.default_sclk = sclk;
rdev->clock.default_mclk = mclk;
+ if (RBIOS32(pll_info + 0x16))
+ rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+ else
+ rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
return true;
}
return false;
@@ -1548,10 +1553,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4a48)) {
/* Mac X800 */
rdev->mode_info.connector_table = CT_MAC_X800;
- } else if ((rdev->pdev->device == 0x4150) &&
+ } else if ((of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3")) &&
+ (rdev->pdev->device == 0x4150) &&
(rdev->pdev->subsystem_vendor == 0x1002) &&
(rdev->pdev->subsystem_device == 0x4150)) {
- /* Mac G5 9600 */
+ /* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else
#endif /* CONFIG_PPC_PMAC */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee1dccb3fec9..cbfca3a24fdf 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,6 +44,8 @@ extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
+bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
+
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -626,8 +628,14 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
static int radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
/* XXX check mode bandwidth */
- /* XXX verify against max DAC output frequency */
+
+ if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -830,6 +838,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
drm_get_connector_name(connector));
+ /* rs690 seems to have a problem with connectors not existing and always
+ * return a block of 0's. If we see this just stop polling on this output */
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ ret = connector_status_disconnected;
+ DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+ radeon_connector->ddc_bus = NULL;
+ }
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1015,6 +1030,11 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
} else
return MODE_CLOCK_HIGH;
}
+
+ /* check against the max pixel clock */
+ if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -1052,10 +1072,11 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int ret;
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- struct drm_encoder *encoder;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
if (!radeon_dig_connector->edp_on)
@@ -1067,7 +1088,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
ATOM_TRANSMITTER_ACTION_POWER_OFF);
if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
if (encoder) {
radeon_fixup_lvds_native_mode(encoder, connector);
/* add scaled modes */
@@ -1091,8 +1111,14 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
- } else
+ } else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
ret = radeon_ddc_get_modes(radeon_connector);
+ }
return ret;
}
@@ -1176,14 +1202,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
@@ -1203,6 +1230,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
@@ -1217,6 +1249,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
}
+
+ if ((ret == connector_status_disconnected) &&
+ radeon_connector->dac_load_detect) {
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ if (encoder) {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ }
}
radeon_connector_update_scratch_regs(connector, ret);
@@ -1231,7 +1273,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
@@ -1241,7 +1284,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- /* AVIVO hardware supports downscaling modes larger than the panel
+ /* AVIVO hardware supports downscaling modes larger than the panel
* to the panel size, but I'm not sure this is desirable.
*/
if ((mode->hdisplay > native_mode->hdisplay) ||
@@ -1390,6 +1433,10 @@ radeon_add_atom_connector(struct drm_device *dev,
default:
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1411,6 +1458,12 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8c1916941871..fae00c0d75aa 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -228,6 +228,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
parser.filp = filp;
parser.rdev = rdev;
parser.dev = rdev->dev;
+ parser.family = rdev->family;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5b61364e31f4..7cfaa7e2f3b5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -82,6 +82,8 @@ static const char radeon_family_name[][16] = {
"CYPRESS",
"HEMLOCK",
"PALM",
+ "SUMO",
+ "SUMO2",
"BARTS",
"TURKS",
"CAICOS",
@@ -213,6 +215,8 @@ int radeon_wb_init(struct radeon_device *rdev)
return r;
}
+ /* clear wb memory */
+ memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
@@ -752,6 +756,7 @@ int radeon_device_init(struct radeon_device *rdev,
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
+ rdev->need_dma32 = true;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ae247eec87c0..292f73f0ddbd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -264,6 +264,8 @@ static void radeon_unpin_work_func(struct work_struct *__work)
radeon_bo_unreserve(work->old_rbo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
+
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
kfree(work);
}
@@ -371,6 +373,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -378,12 +382,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (radeon_crtc->unpin_work) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(work);
- radeon_fence_unref(&fence);
-
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- return -EBUSY;
+ r = -EBUSY;
+ goto unlock_free;
}
radeon_crtc->unpin_work = work;
radeon_crtc->deferred_flip_completion = 0;
@@ -497,6 +498,8 @@ pflip_cleanup1:
pflip_cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
+unlock_free:
+ drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
spin_unlock_irqrestore(&dev->event_lock, flags);
radeon_fence_unref(&fence);
kfree(work);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1d330606292f..73dfbe8e5f9e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -113,7 +113,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_audio = 1;
+int radeon_audio = 0;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
@@ -151,7 +151,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
+MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
module_param_named(audio, radeon_audio, int, 0444);
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 1b557554696e..b293487e5aa3 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -367,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
if (ASIC_IS_DCE3(rdev) &&
- (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ radeon_encoder_is_dp_bridge(encoder))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
@@ -660,21 +661,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_encoder_is_dp_bridge(encoder))
return ATOM_ENCODER_MODE_DP;
+ /* DVO is always DVO */
+ if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+ return ATOM_ENCODER_MODE_DVO;
+
connector = radeon_get_connector_for_encoder(encoder);
- if (!connector) {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- return ATOM_ENCODER_MODE_DVI;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- default:
- return ATOM_ENCODER_MODE_CRT;
- }
- }
+ /* if we don't have an active device yet, just use one of
+ * the connectors tied to the encoder.
+ */
+ if (!connector)
+ connector = radeon_get_connector_for_encoder_init(encoder);
radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) {
@@ -954,10 +950,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
int dp_lane_count = 0;
int connector_object_id = 0;
int igp_lane_info = 0;
+ int dig_encoder = dig->dig_encoder;
- if (action == ATOM_TRANSMITTER_ACTION_INIT)
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
connector = radeon_get_connector_for_encoder_init(encoder);
- else
+ /* just needed to avoid bailing in the encoder check. the encoder
+ * isn't used for init
+ */
+ dig_encoder = 0;
+ } else
connector = radeon_get_connector_for_encoder(encoder);
if (connector) {
@@ -973,7 +974,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
/* no dig encoder assigned */
- if (dig->dig_encoder == -1)
+ if (dig_encoder == -1)
return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
@@ -1023,7 +1024,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (dig->linkb)
args.v3.acConfig.ucLinkSel = 1;
- if (dig->dig_encoder & 1)
+ if (dig_encoder & 1)
args.v3.acConfig.ucEncoderSel = 1;
/* Select the PLL for the PHY
@@ -1073,7 +1074,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v3.acConfig.fDualLinkConnector = 1;
}
} else if (ASIC_IS_DCE32(rdev)) {
- args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
+ args.v2.acConfig.ucEncoderSel = dig_encoder;
if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
@@ -1089,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
break;
}
- if (is_dp)
+ if (is_dp) {
args.v2.acConfig.fCoherentMode = 1;
- else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ args.v2.acConfig.fDPConnector = 1;
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
@@ -1100,7 +1102,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
- if (dig->dig_encoder)
+ if (dig_encoder)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
@@ -1430,7 +1432,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -1521,26 +1527,29 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
}
if (ext_encoder) {
- int action;
-
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
- if (ASIC_IS_DCE41(rdev))
- action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
- else
- action = ATOM_ENABLE;
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev))
- action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
- else
- action = ATOM_DISABLE;
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
- atombios_external_encoder_setup(encoder, ext_encoder, action);
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
@@ -1999,6 +2008,65 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
return connector_status_disconnected;
}
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+ u32 bios_0_scratch;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return connector_status_unknown;
+
+ if (!ext_encoder)
+ return connector_status_unknown;
+
+ if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+ return connector_status_unknown;
+
+ /* load detect on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+
+ if (ext_encoder)
+ /* ddc_setup on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2162,7 +2230,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.disable = radeon_atom_encoder_disable,
- /* no detect for TMDS/LVDS yet */
+ .detect = radeon_atom_dig_detect,
};
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 6f1d9e563e77..ec2f1ea84f81 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -81,6 +81,8 @@ enum radeon_family {
CHIP_CYPRESS,
CHIP_HEMLOCK,
CHIP_PALM,
+ CHIP_SUMO,
+ CHIP_SUMO2,
CHIP_BARTS,
CHIP_TURKS,
CHIP_CAICOS,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 1f8229436570..021d2b6b556f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,6 +40,35 @@
#include "radeon.h"
#include "radeon_trace.h"
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+{
+ if (rdev->wb.enabled) {
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
+ } else
+ WREG32(rdev->fence_drv.scratch_reg, seq);
+}
+
+static u32 radeon_fence_read(struct radeon_device *rdev)
+{
+ u32 seq;
+
+ if (rdev->wb.enabled) {
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
+ } else
+ seq = RREG32(rdev->fence_drv.scratch_reg);
+ return seq;
+}
+
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
@@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
return 0;
}
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
- if (!rdev->cp.ready) {
+ if (!rdev->cp.ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- } else
+ radeon_fence_write(rdev, fence->seq);
+ else
radeon_fence_ring_emit(rdev, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
@@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
- } else
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ seq = radeon_fence_read(rdev);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
@@ -251,7 +272,7 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ radeon_fence_write(rdev, fence->seq);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
- WREG32(rdev->fence_drv.scratch_reg, 0);
+ radeon_fence_write(rdev, 0);
atomic_set(&rdev->fence_drv.seq, 0);
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
@@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct radeon_fence *fence;
seq_printf(m, "Last signaled fence 0x%08X\n",
- RREG32(rdev->fence_drv.scratch_reg));
+ radeon_fence_read(rdev));
if (!list_empty(&rdev->fence_drv.emited)) {
fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 977a341266b6..6df4e3cec0c2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -483,6 +483,8 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 86eda1ea94df..aaa19dc418a0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -487,6 +487,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
case THERMAL_TYPE_SUMO:
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 92f1900dc7ca..ea49752ee99c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -758,6 +758,5 @@ r600 0x9400
0x00009714 VC_ENHANCE
0x00009830 DB_DEBUG
0x00009838 DB_WATERMARKS
-0x00028D28 DB_SRESULTS_COMPARE_STATE0
0x00028D44 DB_ALPHA_TO_MASK
0x00009700 VC_CNTL
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ef8a5babe9f7..6f508ffd1035 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -105,6 +105,9 @@ void rv770_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bf5f83ea14fe..cb1ee4e0050a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -647,9 +647,6 @@ int savage_driver_firstopen(struct drm_device *dev)
ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
&dev_priv->aperture);
- if (ret)
- return ret;
-
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 90e23e0bfadb..58c271ebc0f7 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,6 +31,7 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
- from_page = read_mapping_page(swap_space, i, NULL);
+ from_page = shmem_read_mapping_page(swap_space, i);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = read_mapping_page(swap_space, i, NULL);
+ to_page = shmem_read_mapping_page(swap_space, i);
if (unlikely(IS_ERR(to_page))) {
ret = PTR_ERR(to_page);
goto out_err;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 67d2a7585934..36ca465c00ce 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
- 3M PCT touch screens
- ActionStar dual touch panels
- Cando dual touch panels
+ - Chunghwa panels
- CVTouch panels
- Cypress TrueTouch panels
- Elo TouchSystems IntelliTouch Plus panels
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c957c4b4fe70..6f3289a57888 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1422,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0b374a6d6db0..a756ee6c7df5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -173,6 +173,9 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+#define USB_VENDOR_ID_CHUNGHWAT 0x2247
+#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
+
#define USB_VENDOR_ID_CIDC 0x1677
#define USB_VENDOR_ID_CMEDIA 0x0d8c
@@ -446,6 +449,7 @@
#define USB_VENDOR_ID_LUMIO 0x202e
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -622,6 +626,7 @@
#define USB_VENDOR_ID_UCLOGIC 0x5543
#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
+#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index a5eda4c8127a..0ec91c18a421 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- /*
- * The device reponds with 'invalid report id' when feature
- * report switching it into multitouch mode is sent to it.
- *
- * This results in -EIO from the _raw low-level transport callback,
- * but there seems to be no other way of switching the mode.
- * Thus the super-ugly hacky success check below.
- */
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
HID_FEATURE_REPORT);
- if (ret != -EIO) {
+ if (ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ecd4d2db9e80..62cac4dc3b62 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -64,6 +64,7 @@ struct mt_device {
struct mt_class *mtclass; /* our mt device class */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
+ int last_mt_collection; /* last known mt-related collection */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
__u8 num_received; /* how many contacts we received */
__u8 num_expected; /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_move);
/* touchscreen emulation */
set_abs(hi->input, ABS_X, field, cls->sn_move);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_GD_Y:
if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_move);
/* touchscreen emulation */
set_abs(hi->input, ABS_Y, field, cls->sn_move);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
}
return 0;
@@ -246,31 +251,42 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_INRANGE:
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_CONFIDENCE:
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_TIPSWITCH:
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_CONTACTID:
+ if (!td->maxcontacts)
+ td->maxcontacts = MT_DEFAULT_MAXCONTACT;
input_mt_init_slots(hi->input, td->maxcontacts);
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
+ td->last_mt_collection = usage->collection_index;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TOUCH_MAJOR);
set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
cls->sn_width);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_HEIGHT:
hid_map_usage(hi, usage, bit, max,
@@ -279,8 +295,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_height);
input_set_abs_params(hi->input,
ABS_MT_ORIENTATION, 0, 1, 0, 0);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_TIPPRESSURE:
if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +310,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/* touchscreen emulation */
set_abs(hi->input, ABS_PRESSURE, field,
cls->sn_pressure);
- td->last_slot_field = usage->hid;
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index) {
+ td->last_slot_field = usage->hid;
+ td->last_field_index = field->index;
+ }
return 1;
case HID_DG_CONTACTCOUNT:
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index)
+ td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTMAX:
/* we don't set td->last_slot_field as contactcount and
* contact max are global to the report */
- td->last_field_index = field->index;
+ if (td->last_mt_collection == usage->collection_index)
+ td->last_field_index = field->index;
return -1;
}
/* let hid-input decide for the others */
@@ -516,6 +538,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
td->mtclass = mtclass;
td->inputmode = -1;
+ td->last_mt_collection = -1;
hid_set_drvdata(hdev, td);
ret = hid_parse(hdev);
@@ -526,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto fail;
- if (!td->maxcontacts)
- td->maxcontacts = MT_DEFAULT_MAXCONTACT;
-
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -593,6 +613,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
+ /* Chunghwa Telecom touch panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
+ USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+
/* CVTouch panels */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
@@ -651,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
USB_DEVICE_ID_CRYSTALTOUCH) },
+ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
+ USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -681,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
USB_DEVICE_ID_MTP)},
{ .driver_data = MT_CLS_CONFIDENCE,
- HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
{ .driver_data = MT_CLS_CONFIDENCE,
- HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
USB_DEVICE_ID_MTP_SITRONIX)},
/* Touch International panels */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0e30b140edca..621959d5cc42 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index ff3c644888b1..7c1188b53c3e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
usbhid_close(list->hiddev->hid);
usbhid_put_power(list->hiddev->hid);
} else {
+ mutex_unlock(&list->hiddev->existancelock);
kfree(list->hiddev);
+ kfree(list);
+ return 0;
}
}
- kfree(list);
mutex_unlock(&list->hiddev->existancelock);
+ kfree(list);
return 0;
}
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
usb_deregister_dev(usbhid->intf, &hiddev_class);
if (hiddev->open) {
+ mutex_unlock(&hiddev->existancelock);
usbhid_close(hiddev->hid);
wake_up_interruptible(&hiddev->wait);
} else {
+ mutex_unlock(&hiddev->existancelock);
kfree(hiddev);
}
- mutex_unlock(&hiddev->existancelock);
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b5e892017e0c..dcb78a7a8047 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
static void atk_init_attribute(struct device_attribute *attr, char *name,
sysfs_show_func show)
{
+ sysfs_attr_init(&attr->attr);
attr->attr.name = name;
attr->attr.mode = 0444;
attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
int err;
list_for_each_entry(s, &data->sensor_list, list) {
- sysfs_attr_init(&s->input_attr.attr);
err = device_create_file(data->hwmon_dev, &s->input_attr);
if (err)
return err;
- sysfs_attr_init(&s->label_attr.attr);
err = device_create_file(data->hwmon_dev, &s->label_attr);
if (err)
return err;
- sysfs_attr_init(&s->limit1_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit1_attr);
if (err)
return err;
- sysfs_attr_init(&s->limit2_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit2_attr);
if (err)
return err;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 9577c432e77f..0070d5476dd0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -97,9 +97,7 @@ struct platform_data {
struct pdev_entry {
struct list_head list;
struct platform_device *pdev;
- unsigned int cpu;
u16 phys_proc_id;
- u16 cpu_core_id;
};
static LIST_HEAD(pdev_list);
@@ -296,7 +294,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val > 80 && val < 120) {
+ if (val) {
dev_info(dev, "TjMax is %d C.\n", val);
return val * 1000;
}
@@ -304,24 +302,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
/*
* An assumption is made for early CPUs and unreadable MSR.
- * NOTE: the given value may not be correct.
+ * NOTE: the calculated value may not be correct.
*/
-
- switch (c->x86_model) {
- case 0xe:
- case 0xf:
- case 0x16:
- case 0x1a:
- dev_warn(dev, "TjMax is assumed as 100 C!\n");
- return 100000;
- case 0x17:
- case 0x1c: /* Atom CPUs */
- return adjust_tjmax(c, id, dev);
- default:
- dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
- " using default TjMax of 100C.\n", c->x86_model);
- return 100000;
- }
+ return adjust_tjmax(c, id, dev);
}
static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +324,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (!err) {
val = (eax >> 16) & 0xff;
- if (val > 80 && val < 120)
+ if (val)
return val * 1000;
}
dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
@@ -350,6 +333,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
static int create_name_attr(struct platform_data *pdata, struct device *dev)
{
+ sysfs_attr_init(&pdata->name_attr.attr);
pdata->name_attr.attr.name = "name";
pdata->name_attr.attr.mode = S_IRUGO;
pdata->name_attr.show = show_name;
@@ -372,6 +356,7 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
for (i = 0; i < MAX_ATTRS; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
attr_no);
+ sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
@@ -422,7 +407,7 @@ static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
}
}
-static int chk_ucode_version(struct platform_device *pdev)
+static int __devinit chk_ucode_version(struct platform_device *pdev)
{
struct cpuinfo_x86 *c = &cpu_data(pdev->id);
int err;
@@ -509,8 +494,8 @@ static int create_core_data(struct platform_data *pdata,
/*
* Provide a single set of attributes for all HT siblings of a core
* to avoid duplicate sensors (the processor ID and core ID of all
- * HT siblings of a core is the same).
- * Skip if a HT sibling of this core is already online.
+ * HT siblings of a core are the same).
+ * Skip if a HT sibling of this core is already registered.
* This is not an error.
*/
if (pdata->core_data[attr_no] != NULL)
@@ -666,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
}
pdev_entry->pdev = pdev;
- pdev_entry->cpu = cpu;
pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
- pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
@@ -770,10 +753,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
coretemp_remove_core(pdata, &pdev->dev, indx);
/*
- * If a core is taken offline, but a HT sibling of the same core is
- * still online, register the alternate sibling. This ensures that
- * exactly one set of attributes is provided as long as at least one
- * HT sibling of a core is online.
+ * If a HT sibling of a core is taken offline, but another HT sibling
+ * of the same core is still online, register the alternate sibling.
+ * This ensures that exactly one set of attributes is provided as long
+ * as at least one HT sibling of a core is online.
*/
for_each_sibling(i, cpu) {
if (i != cpu) {
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 537409d07ee7..1a409c5bc9bc 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
/* Set up read-only sensors */
while (ro->label) {
+ sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = ro->label;
sensors->dev_attr.attr.mode = S_IRUGO;
sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
/* Set up read-write sensors */
while (rw->label) {
+ sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = rw->label;
sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
sensors->dev_attr.show = rw->show;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 06d4eafcf76b..41dbf8161ed7 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
else if (type == POWER_SENSOR)
sprintf(n, power_sensor_name_templates[func], "power", counter);
+ sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
data->sensors[sensor].attr[func].dev_attr.attr.name = n;
data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 0f9fc40379cd..e855d3b0bd1f 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
if (man_id != 0x4D)
return -ENODEV;
+ /* sanity check */
+ if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
+ || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
+ || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
+ return -ENODEV;
+
/*
* We read the config and status register, the 4 lower bits in the
* config register should be zero and bit 5, 3, 1 and 0 should be
* zero in the status register.
*/
reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
+ if ((reg_config & 0x0f) != 0x00)
+ return -ENODEV;
+
+ /* in between, another round of sanity checks */
+ if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
+ || i2c_smbus_read_byte_data(client, 0x06) != reg_config
+ || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
+ return -ENODEV;
+
reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
- if (((reg_config & 0x0f) != 0x00) ||
- ((reg_status & 0x2b) != 0x00))
+ if ((reg_status & 0x2b) != 0x00)
return -ENODEV;
strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 98799bab69ce..354770ed3186 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -707,6 +707,7 @@ do { \
struct sensor_device_attribute *a \
= &data->_type##s[data->num_##_type##s].attribute; \
BUG_ON(data->num_attributes >= data->max_attributes); \
+ sysfs_attr_init(&a->dev_attr.attr); \
a->dev_attr.attr.name = _name; \
a->dev_attr.attr.mode = _mode; \
a->dev_attr.show = _show; \
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 92b42db43bcf..b39f52e2752a 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
attr = &attrs->in;
attr->index = channel;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attrs->in_name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
attr = &attrs->label;
attr->index = channel;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attrs->label_name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = s3c_hwmon_label_show;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e5123b1d341..04b09564bfa9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -778,7 +778,8 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
sector_t block)
{
struct ide_cmd cmd;
- int uptodate = 0, nsectors;
+ int uptodate = 0;
+ unsigned int nsectors;
ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
rq->cmd[0], (unsigned long long)block);
@@ -1782,7 +1783,6 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd04ec2f..31fb44085c9b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int disconnect = 1;
int release = 0;
- int abort = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
+ int ret;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
attrs.next_state = C4IW_QP_STATE_CLOSING;
- abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- disconnect = 1;
+ if (ret != -ECONNRESET) {
+ peer_close_upcall(ep);
+ disconnect = 1;
+ }
break;
case ABORTING:
disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- mutex_unlock(&ep->com.mutex);
if (close) {
- if (abrupt)
- ret = abort_connection(ep, NULL, gfp);
- else
+ if (abrupt) {
+ close_complete_upcall(ep);
+ ret = send_abort(ep, NULL, gfp);
+ } else
ret = send_halfclose(ep, gfp);
if (ret)
fatal = 1;
}
+ mutex_unlock(&ep->com.mutex);
if (fatal)
release_ep_resources(ep);
return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ kfree_skb(skb);
+ return 0;
+ }
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ sched(dev, skb);
+ return 0;
+}
+
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_ESTABLISH] = sched,
[CPL_PEER_CLOSE] = sched,
[CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_ABORT_REQ_RSS] = peer_abort_intr,
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fcd..1720dc790d13 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp->cq.queue;
+ while (hwentries > T4_MAX_IQ_SIZE) {
+ memsize -= PAGE_SIZE;
+ hwentries = memsize / sizeof *chp->cq.queue;
+ }
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe49525a..0347eed4a167 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -625,7 +625,7 @@ pbl_done:
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
+ mhp->attr.len = length;
err = register_mem(rhp, php, mhp, shift);
if (err)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b05a898..a41578e48c7b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
c4iw_get_ep(&qhp->ep->com);
}
ret = rdma_fini(rhp, qhp, ep);
- if (ret) {
- if (internal)
- c4iw_get_ep(&qhp->ep->com);
+ if (ret)
goto err;
- }
break;
case C4IW_QP_STATE_TERMINATE:
set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68a096a..8ec5237031a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
#define IB_7322_LT_STATE_CFGENH 0x10
#define IB_7322_LT_STATE_CFGTEST 0x11
+#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
+#define IB_7322_LT_STATE_CFGWAITENH 0x13
/* link state machine states from IBC */
#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [IB_7322_LT_STATE_CFGWAITRMTTEST] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITENH] =
+ IB_PHYSPORTSTATE_CFG_WAIT_ENH,
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
break;
}
- if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
+ ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
+ ibclt == IB_7322_LT_STATE_LINKUP) &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
{
u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
- ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
- if (enable)
+ u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
+
+ if (enable && !state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+ ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- else
+ } else if (!enable && state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+ ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ }
qib_write_kreg_port(ppd, krp_serdesctrl, data);
}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56ec8a6..6ae57d23004a 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
* states, or if it transitions from any of the up (INIT or better)
* states into any of the down states (except link recovery), then
* call the chip-specific code to take appropriate actions.
+ *
+ * ppd->lflags could be 0 if this is the first time the interrupt
+ * handlers has been called but the link is already up.
*/
- if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ if (lstate >= IB_PORT_INIT &&
+ (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* transitioned to UP */
if (dd->f_ib_updown(ppd, 1, ibcs))
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index be0921ef6b52..4cf25347b015 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
rcu_read_unlock();
- wake_up_interruptible(&evdev->wait);
+ if (type == EV_SYN && code == SYN_REPORT)
+ wake_up_interruptible(&evdev->wait);
}
static int evdev_fasync(int fd, struct file *file, int on)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75e11c7b70fd..da38d97a51b1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
- clamp(mt_slots, 2, 32);
+ mt_slots = clamp(mt_slots, 2, 32);
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
mt_slots = 2;
} else {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index f23a743817db..33d0bdc837c0 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
#endif
}
}
+ input_sync(omap_kp_data->input);
memcpy(keypad_state, new_state, sizeof(keypad_state));
if (key_down) {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 834cf98e7efb..6876700a4469 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -32,7 +32,7 @@ static const struct {
[SH_KEYSC_MODE_3] = { 2, 4, 7 },
[SH_KEYSC_MODE_4] = { 3, 6, 6 },
[SH_KEYSC_MODE_5] = { 4, 6, 7 },
- [SH_KEYSC_MODE_6] = { 5, 7, 7 },
+ [SH_KEYSC_MODE_6] = { 5, 8, 8 },
};
struct sh_keysc_priv {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 257e033986e4..0110b5a3a167 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
if (size == 0)
size = xres ? : 1;
- clamp(value, min, max);
+ value = clamp(value, min, max);
mousedev->packet.x = ((value - min) * xres) / size;
mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
if (size == 0)
size = yres ? : 1;
- clamp(value, min, max);
+ value = clamp(value, min, max);
mousedev->packet.y = yres - ((value - min) * yres) / size;
mousedev->packet.abs_event = 1;
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index f3698967edf6..8755f5f3ad37 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
* 'interrupt' routine.
*/
-static unsigned int serport_ldisc_receive(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
unsigned int ch_flags;
- int ret = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
- if (!test_bit(SERPORT_ACTIVE, &serport->flags)) {
- ret = -EINVAL;
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
goto out;
- }
for (i = 0; i < count; i++) {
switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
out:
spin_unlock_irqrestore(&serport->lock, flags);
-
- return ret == 0 ? count : ret;
}
/*
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 59de638225fe..e35058bcd7b9 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
if (!cs || !try_module_get(cs->driver->owner))
return -ENODEV;
- if (mutex_lock_interruptible(&cs->mutex))
+ if (mutex_lock_interruptible(&cs->mutex)) {
+ module_put(cs->driver->owner);
return -ERESTARTSYS;
+ }
tty->driver_data = cs;
++cs->open_count;
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 1d44d470897c..86a5c4f7775e 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
* cflags buffer containing error flags for received characters (ignored)
* count number of received characters
*/
-static unsigned int
+static void
gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
struct inbuf_t *inbuf;
if (!cs)
- return -ENODEV;
+ return;
inbuf = cs->inbuf;
if (!inbuf) {
dev_err(cs->dev, "%s: no inbuf\n", __func__);
cs_put(cs);
- return -EINVAL;
+ return;
}
tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
gigaset_schedule_event(cs);
cs_put(cs);
-
- return count;
}
/*
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 3ccbff13eaf2..71a8eb6ef71e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -283,6 +283,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
sizeof(struct ph_info_dch) + dch->dev.nrbchan *
sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
+ kfree(phi);
}
/*
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 23f0d5e99f35..713d43b4e563 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,3 +1,10 @@
+config LEDS_GPIO_REGISTER
+ bool
+ help
+ This option provides the function gpio_led_register_device.
+ As this function is used by arch code it must not be compiled as a
+ module.
+
menuconfig NEW_LEDS
bool "LED Support"
help
@@ -7,22 +14,14 @@ menuconfig NEW_LEDS
This is not related to standard keyboard LEDs which are controlled
via the input system.
+if NEW_LEDS
+
config LEDS_CLASS
bool "LED Class Support"
- depends on NEW_LEDS
help
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
-config LEDS_GPIO_REGISTER
- bool
- help
- This option provides the function gpio_led_register_device.
- As this function is used by arch code it must not be compiled as a
- module.
-
-if NEW_LEDS
-
comment "LED drivers"
config LEDS_88PM860X
@@ -391,6 +390,7 @@ config LEDS_NETXBIG
config LEDS_ASIC3
bool "LED support for the HTC ASIC3"
+ depends on LEDS_CLASS
depends on MFD_ASIC3
default y
help
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index c0cff64a1ae6..cc1dc4817fac 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
&lp5521_led_attribute_group);
}
-static int __init lp5521_init_led(struct lp5521_led *led,
+static int __devinit lp5521_init_led(struct lp5521_led *led,
struct i2c_client *client,
int chan, struct lp5521_platform_data *pdata)
{
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
return 0;
}
-static int lp5521_probe(struct i2c_client *client,
+static int __devinit lp5521_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5521_chip *chip;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e19fed25f137..5971e309b234 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
return 0;
}
-static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
int chan, struct lp5523_platform_data *pdata)
{
char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
static struct i2c_driver lp5523_driver;
-static int lp5523_probe(struct i2c_client *client,
+static int __devinit lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5523_chip *chip;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 70bd738b8b99..574b09afedd3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap)
kunmap_atomic(sb, KM_USER0);
}
+/*
+ * bitmap_new_disk_sb
+ * @bitmap
+ *
+ * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
+ * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
+ * This function verifies 'bitmap_info' and populates the on-disk bitmap
+ * structure, which is to be written to disk.
+ *
+ * Returns: 0 on success, -Exxx on error
+ */
+static int bitmap_new_disk_sb(struct bitmap *bitmap)
+{
+ bitmap_super_t *sb;
+ unsigned long chunksize, daemon_sleep, write_behind;
+ int err = -EINVAL;
+
+ bitmap->sb_page = alloc_page(GFP_KERNEL);
+ if (IS_ERR(bitmap->sb_page)) {
+ err = PTR_ERR(bitmap->sb_page);
+ bitmap->sb_page = NULL;
+ return err;
+ }
+ bitmap->sb_page->index = 0;
+
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+
+ sb->magic = cpu_to_le32(BITMAP_MAGIC);
+ sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
+
+ chunksize = bitmap->mddev->bitmap_info.chunksize;
+ BUG_ON(!chunksize);
+ if (!is_power_of_2(chunksize)) {
+ kunmap_atomic(sb, KM_USER0);
+ printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+ return -EINVAL;
+ }
+ sb->chunksize = cpu_to_le32(chunksize);
+
+ daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
+ if (!daemon_sleep ||
+ (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
+ printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+ daemon_sleep = 5 * HZ;
+ }
+ sb->daemon_sleep = cpu_to_le32(daemon_sleep);
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+
+ /*
+ * FIXME: write_behind for RAID1. If not specified, what
+ * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
+ */
+ write_behind = bitmap->mddev->bitmap_info.max_write_behind;
+ if (write_behind > COUNTER_MAX)
+ write_behind = COUNTER_MAX / 2;
+ sb->write_behind = cpu_to_le32(write_behind);
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+
+ /* keep the array size field of the bitmap superblock up to date */
+ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
+
+ memcpy(sb->uuid, bitmap->mddev->uuid, 16);
+
+ bitmap->flags |= BITMAP_STALE;
+ sb->state |= cpu_to_le32(BITMAP_STALE);
+ bitmap->events_cleared = bitmap->mddev->events;
+ sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+
+ bitmap->flags |= BITMAP_HOSTENDIAN;
+ sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
+
+ kunmap_atomic(sb, KM_USER0);
+
+ return 0;
+}
+
/* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap)
{
@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
reason = "unrecognized superblock version";
else if (chunksize < 512)
reason = "bitmap chunksize too small";
- else if ((1 << ffz(~chunksize)) != chunksize)
+ else if (!is_power_of_2(chunksize))
reason = "bitmap chunksize not a power of 2";
else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
reason = "daemon sleep period out of range";
@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
}
printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu/%lu pages, set %lu bits\n",
- bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
+ "read %lu/%lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
return 0;
@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
return 0;
}
- if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
+ if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
DEFINE_WAIT(__wait);
/* note that it is safe to do the prepare_to_wait
* after the test as long as we do it before dropping
@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
}
- if (!success && ! (*bmc & NEEDED_MASK))
+ if (!success && !NEEDED(*bmc))
*bmc |= NEEDED_MASK;
- if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
+ if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait);
(*bmc)--;
@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev)
vfs_fsync(file, 1);
}
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
- if (!mddev->bitmap_info.external)
- err = bitmap_read_sb(bitmap);
- else {
+ if (!mddev->bitmap_info.external) {
+ /*
+ * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
+ * instructing us to create a new on-disk bitmap instance.
+ */
+ if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
+ err = bitmap_new_disk_sb(bitmap);
+ else
+ err = bitmap_read_sb(bitmap);
+ } else {
err = 0;
if (mddev->bitmap_info.chunksize == 0 ||
mddev->bitmap_info.daemon_sleep == 0)
@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunks = chunks;
bitmap->pages = pages;
bitmap->missing_pages = pages;
- bitmap->counter_bits = COUNTER_BITS;
-
- bitmap->syncchunk = ~0UL;
#ifdef INJECT_FATAL_FAULT_1
bitmap->bp = NULL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index d0aeaf46d932..b2a127e891ac 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -85,7 +85,6 @@
typedef __u16 bitmap_counter_t;
#define COUNTER_BITS 16
#define COUNTER_BIT_SHIFT 4
-#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
@@ -196,19 +195,10 @@ struct bitmap {
mddev_t *mddev; /* the md device that the bitmap is for */
- int counter_bits; /* how many bits per block counter */
-
/* bitmap chunksize -- how much data does each bit represent? */
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
unsigned long chunks; /* total number of data chunks for the array */
- /* We hold a count on the chunk currently being synced, and drop
- * it when the last block is started. If the resync is aborted
- * midway, we need to be able to drop that count, so we remember
- * the counted chunk..
- */
- unsigned long syncchunk;
-
__u64 events_cleared;
int need_sync;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 76a5af00a26b..2067288f61f9 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,6 +19,8 @@
#define DM_MSG_PREFIX "io"
#define DM_IO_MAX_REGIONS BITS_PER_LONG
+#define MIN_IOS 16
+#define MIN_BIOS 16
struct dm_io_client {
mempool_t *pool;
@@ -41,33 +43,21 @@ struct io {
static struct kmem_cache *_dm_io_cache;
/*
- * io contexts are only dynamically allocated for asynchronous
- * io. Since async io is likely to be the majority of io we'll
- * have the same number of io contexts as bios! (FIXME: must reduce this).
- */
-
-static unsigned int pages_to_ios(unsigned int pages)
-{
- return 4 * pages; /* too many ? */
-}
-
-/*
* Create a client with mempool and bioset.
*/
-struct dm_io_client *dm_io_client_create(unsigned num_pages)
+struct dm_io_client *dm_io_client_create(void)
{
- unsigned ios = pages_to_ios(num_pages);
struct dm_io_client *client;
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
+ client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
if (!client->pool)
goto bad;
- client->bios = bioset_create(16, 0);
+ client->bios = bioset_create(MIN_BIOS, 0);
if (!client->bios)
goto bad;
@@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
}
EXPORT_SYMBOL(dm_io_client_create);
-int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
-{
- return mempool_resize(client->pool, pages_to_ios(num_pages),
- GFP_KERNEL);
-}
-EXPORT_SYMBOL(dm_io_client_resize);
-
void dm_io_client_destroy(struct dm_io_client *client)
{
mempool_destroy(client->pool);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bb73a13ca40..819e37eaaeba 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -27,15 +27,19 @@
#include "dm.h"
+#define SUB_JOB_SIZE 128
+#define SPLIT_COUNT 8
+#define MIN_JOBS 8
+#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
+
/*-----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
*---------------------------------------------------------------*/
struct dm_kcopyd_client {
- spinlock_t lock;
struct page_list *pages;
- unsigned int nr_pages;
- unsigned int nr_free_pages;
+ unsigned nr_reserved_pages;
+ unsigned nr_free_pages;
struct dm_io_client *io_client;
@@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc)
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
}
-static struct page_list *alloc_pl(void)
+/*
+ * Obtain one page for the use of kcopyd.
+ */
+static struct page_list *alloc_pl(gfp_t gfp)
{
struct page_list *pl;
- pl = kmalloc(sizeof(*pl), GFP_KERNEL);
+ pl = kmalloc(sizeof(*pl), gfp);
if (!pl)
return NULL;
- pl->page = alloc_page(GFP_KERNEL);
+ pl->page = alloc_page(gfp);
if (!pl->page) {
kfree(pl);
return NULL;
@@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl)
kfree(pl);
}
-static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
- unsigned int nr, struct page_list **pages)
+/*
+ * Add the provided pages to a client's free page list, releasing
+ * back to the system any beyond the reserved_pages limit.
+ */
+static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
{
- struct page_list *pl;
-
- spin_lock(&kc->lock);
- if (kc->nr_free_pages < nr) {
- spin_unlock(&kc->lock);
- return -ENOMEM;
- }
-
- kc->nr_free_pages -= nr;
- for (*pages = pl = kc->pages; --nr; pl = pl->next)
- ;
+ struct page_list *next;
- kc->pages = pl->next;
- pl->next = NULL;
+ do {
+ next = pl->next;
- spin_unlock(&kc->lock);
+ if (kc->nr_free_pages >= kc->nr_reserved_pages)
+ free_pl(pl);
+ else {
+ pl->next = kc->pages;
+ kc->pages = pl;
+ kc->nr_free_pages++;
+ }
- return 0;
+ pl = next;
+ } while (pl);
}
-static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
+static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
+ unsigned int nr, struct page_list **pages)
{
- struct page_list *cursor;
+ struct page_list *pl;
+
+ *pages = NULL;
+
+ do {
+ pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
+ if (unlikely(!pl)) {
+ /* Use reserved pages */
+ pl = kc->pages;
+ if (unlikely(!pl))
+ goto out_of_memory;
+ kc->pages = pl->next;
+ kc->nr_free_pages--;
+ }
+ pl->next = *pages;
+ *pages = pl;
+ } while (--nr);
- spin_lock(&kc->lock);
- for (cursor = pl; cursor->next; cursor = cursor->next)
- kc->nr_free_pages++;
+ return 0;
- kc->nr_free_pages++;
- cursor->next = kc->pages;
- kc->pages = pl;
- spin_unlock(&kc->lock);
+out_of_memory:
+ if (*pages)
+ kcopyd_put_pages(kc, *pages);
+ return -ENOMEM;
}
/*
@@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl)
}
}
-static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
+/*
+ * Allocate and reserve nr_pages for the use of a specific client.
+ */
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
{
- unsigned int i;
+ unsigned i;
struct page_list *pl = NULL, *next;
- for (i = 0; i < nr; i++) {
- next = alloc_pl();
+ for (i = 0; i < nr_pages; i++) {
+ next = alloc_pl(GFP_KERNEL);
if (!next) {
if (pl)
drop_pages(pl);
@@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
pl = next;
}
+ kc->nr_reserved_pages += nr_pages;
kcopyd_put_pages(kc, pl);
- kc->nr_pages += nr;
+
return 0;
}
static void client_free_pages(struct dm_kcopyd_client *kc)
{
- BUG_ON(kc->nr_free_pages != kc->nr_pages);
+ BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
drop_pages(kc->pages);
kc->pages = NULL;
- kc->nr_free_pages = kc->nr_pages = 0;
+ kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
/*-----------------------------------------------------------------
@@ -216,16 +242,17 @@ struct kcopyd_job {
struct mutex lock;
atomic_t sub_jobs;
sector_t progress;
-};
-/* FIXME: this should scale with the number of pages */
-#define MIN_JOBS 512
+ struct kcopyd_job *master_job;
+};
static struct kmem_cache *_job_cache;
int __init dm_kcopyd_init(void)
{
- _job_cache = KMEM_CACHE(kcopyd_job, 0);
+ _job_cache = kmem_cache_create("kcopyd_job",
+ sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
+ __alignof__(struct kcopyd_job), 0, NULL);
if (!_job_cache)
return -ENOMEM;
@@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job)
if (job->pages)
kcopyd_put_pages(kc, job->pages);
- mempool_free(job, kc->job_pool);
+ /*
+ * If this is the master job, the sub jobs have already
+ * completed so we can free everything.
+ */
+ if (job->master_job == job)
+ mempool_free(job, kc->job_pool);
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job)
wake(kc);
}
-#define SUB_JOB_SIZE 128
static void segment_complete(int read_err, unsigned long write_err,
void *context)
{
/* FIXME: tidy this function */
sector_t progress = 0;
sector_t count = 0;
- struct kcopyd_job *job = (struct kcopyd_job *) context;
+ struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
+ struct kcopyd_job *job = sub_job->master_job;
struct dm_kcopyd_client *kc = job->kc;
mutex_lock(&job->lock);
@@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err,
if (count) {
int i;
- struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
- GFP_NOIO);
*sub_job = *job;
sub_job->source.sector += progress;
@@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err,
}
sub_job->fn = segment_complete;
- sub_job->context = job;
+ sub_job->context = sub_job;
dispatch_job(sub_job);
} else if (atomic_dec_and_test(&job->sub_jobs)) {
@@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err,
}
/*
- * Create some little jobs that will do the move between
- * them.
+ * Create some sub jobs to share the work between them.
*/
-#define SPLIT_COUNT 8
-static void split_job(struct kcopyd_job *job)
+static void split_job(struct kcopyd_job *master_job)
{
int i;
- atomic_inc(&job->kc->nr_jobs);
+ atomic_inc(&master_job->kc->nr_jobs);
- atomic_set(&job->sub_jobs, SPLIT_COUNT);
- for (i = 0; i < SPLIT_COUNT; i++)
- segment_complete(0, 0u, job);
+ atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
+ for (i = 0; i < SPLIT_COUNT; i++) {
+ master_job[i + 1].master_job = master_job;
+ segment_complete(0, 0u, &master_job[i + 1]);
+ }
}
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
@@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
struct kcopyd_job *job;
/*
- * Allocate a new job.
+ * Allocate an array of jobs consisting of one master job
+ * followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
@@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->fn = fn;
job->context = context;
+ job->master_job = job;
- if (job->source.count < SUB_JOB_SIZE)
+ if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
-
else {
mutex_init(&job->lock);
job->progress = 0;
@@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-int dm_kcopyd_client_create(unsigned int nr_pages,
- struct dm_kcopyd_client **result)
+struct dm_kcopyd_client *dm_kcopyd_client_create(void)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- spin_lock_init(&kc->lock);
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
@@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
goto bad_workqueue;
kc->pages = NULL;
- kc->nr_pages = kc->nr_free_pages = 0;
- r = client_alloc_pages(kc, nr_pages);
+ kc->nr_reserved_pages = kc->nr_free_pages = 0;
+ r = client_reserve_pages(kc, RESERVE_PAGES);
if (r)
goto bad_client_pages;
- kc->io_client = dm_io_client_create(nr_pages);
+ kc->io_client = dm_io_client_create();
if (IS_ERR(kc->io_client)) {
r = PTR_ERR(kc->io_client);
goto bad_io_client;
@@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
init_waitqueue_head(&kc->destroyq);
atomic_set(&kc->nr_jobs, 0);
- *result = kc;
- return 0;
+ return kc;
bad_io_client:
client_free_pages(kc);
@@ -659,7 +687,7 @@ bad_workqueue:
bad_slab:
kfree(kc);
- return r;
+ return ERR_PTR(r);
}
EXPORT_SYMBOL(dm_kcopyd_client_create);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a1f321889676..948e3f4925bf 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
lc->io_req.mem.type = DM_IO_VMA;
lc->io_req.notify.fn = NULL;
- lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
- PAGE_SIZE));
+ lc->io_req.client = dm_io_client_create();
if (IS_ERR(lc->io_req.client)) {
r = PTR_ERR(lc->io_req.client);
DMWARN("couldn't allocate disk io client");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a550a057d991..aa4e570c2cb5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP || error == -EREMOTEIO)
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
return error;
if (mpio->pgpath)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 976ad4688afc..9bfd057be686 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -22,8 +22,6 @@
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
-#define DM_IO_PAGES 64
-#define DM_KCOPYD_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
return NULL;
}
- ms->io_client = dm_io_client_create(DM_IO_PAGES);
+ ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
mempool_destroy(ms->read_record_pool);
@@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
- if (r)
+ ms->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(ms->kcopyd_client)) {
+ r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
+ }
wakeup_mirrord(ms);
return 0;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 95891dfcbca0..135c2f1fdbfc 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -154,11 +154,6 @@ struct pstore {
struct workqueue_struct *metadata_wq;
};
-static unsigned sectors_to_pages(unsigned sectors)
-{
- return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
-}
-
static int alloc_area(struct pstore *ps)
{
int r = -ENOMEM;
@@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
chunk_size_supplied = 0;
}
- ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
- chunk_size));
+ ps->io_client = dm_io_client_create();
if (IS_ERR(ps->io_client))
return PTR_ERR(ps->io_client);
@@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot)
return r;
}
- r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
- ps->io_client);
- if (r)
- return r;
-
r = alloc_area(ps);
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2d330942cb2..9ecff5f3023a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,11 +40,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
#define SNAPSHOT_COPY_PRIORITY 2
/*
- * Reserve 1MB for each snapshot initially (with minimum of 1 page).
- */
-#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
- if (r) {
+ s->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(s->kcopyd_client)) {
+ r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
goto bad_kcopyd;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cb8380c9767f..451c3bb176d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
+ struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
+ /*
+ * Some devices exist without request functions,
+ * such as loop devices not yet bound to backing files.
+ * Forbid the use of such devices.
+ */
+ q = bdev_get_queue(bdev);
+ if (!q || !q->make_request_fn) {
+ DMWARN("%s: %s is not yet initialised: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
+ return 1;
+ }
+
if (!dev_size)
return 0;
@@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
/*
- * Ensure that at least one underlying device supports discards.
+ * Unless any target used by the table set discards_supported,
+ * require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting discard must provide.
@@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (ti->discards_supported)
+ return 1;
+
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
return 1;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aa640a85bb21..4332fc2f25d4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev)
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
+
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = {
},
};
+static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ if (mddev->sync_super) {
+ mddev->sync_super(mddev, rdev);
+ return;
+ }
+
+ BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
+
+ super_types[mddev->major_version].sync_super(mddev, rdev);
+}
+
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
mdk_rdev_t *rdev, *rdev2;
@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev)
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
- if (blk_get_integrity(mddev->gendisk))
- return 0; /* already registered */
+ if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+ return 0; /* shouldn't register, or already is */
list_for_each_entry(rdev, &mddev->disks, same_set) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
@@ -2168,8 +2183,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
- super_types[mddev->major_version].
- sync_super(mddev, rdev);
+ sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
- if (rdev->mddev->pers->hot_add_disk == NULL)
+ if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev->raid_disk);
@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- md_wakeup_thread(mddev->thread);
- md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
-
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev)
bitmap_destroy(mddev);
goto out;
}
+
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
+
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ if (!err)
+ md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return err;
}
@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev)
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
- printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
- window/2,(unsigned long long) max_sectors/2);
+ printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
+ window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev)
}
EXPORT_SYMBOL_GPL(md_do_sync);
-
static int remove_and_add_spares(mddev_t *mddev)
{
mdk_rdev_t *rdev;
@@ -7157,6 +7173,9 @@ static void reap_sync_thread(mddev_t *mddev)
*/
void md_check_recovery(mddev_t *mddev)
{
+ if (mddev->suspended)
+ return;
+
if (mddev->bitmap)
bitmap_daemon_work(mddev);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 0b1fd3f1d85b..1c26c7a08ae6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -124,6 +124,7 @@ struct mddev_s
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
+#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
int suspended;
atomic_t active_io;
@@ -330,6 +331,7 @@ struct mddev_s
atomic_t flush_pending;
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
+ void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
};
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5d096096f958..f7431b6d8447 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
return best_disk;
}
-static int raid1_congested(void *data, int bits)
+int md_raid1_congested(mddev_t *mddev, int bits)
{
- mddev_t *mddev = data;
conf_t *conf = mddev->private;
int i, ret = 0;
- if (mddev_congested(mddev, bits))
- return 1;
-
rcu_read_lock();
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
+ BUG_ON(!q);
+
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
*/
@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits)
rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL_GPL(md_raid1_congested);
+static int raid1_congested(void *data, int bits)
+{
+ mddev_t *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid1_congested(mddev, bits);
+}
static void flush_pending_writes(conf_t *conf)
{
@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev)
return PTR_ERR(conf);
list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (!mddev->gendisk)
+ continue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- mddev->queue->backing_dev_info.congested_fn = raid1_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
+ if (mddev->queue) {
+ mddev->queue->backing_dev_info.congested_fn = raid1_congested;
+ mddev->queue->backing_dev_info.congested_data = mddev;
+ }
return md_integrity_register(mddev);
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5fc4ca1af863..e743a64fac4f 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -126,4 +126,6 @@ struct r1bio_s {
*/
#define R1BIO_Returned 6
+extern int md_raid1_congested(mddev_t *mddev, int bits);
+
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 346e69bfdab3..b72edf35ec54 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
{
- bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+ bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
}
/* Find first data disk in a raid6 stripe */
@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi = &sh->dev[i].req;
bi->bi_rw = rw;
- if (rw == WRITE)
+ if (rw & WRITE)
bi->bi_end_io = raid5_end_write_request;
else
bi->bi_end_io = raid5_end_read_request;
@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
- if (rw == WRITE &&
+ if ((rw & WRITE) &&
test_bit(R5_ReWrite, &sh->dev[i].flags))
atomic_add(STRIPE_SECTORS,
&rdev->corrected_errors);
generic_make_request(bi);
} else {
- if (rw == WRITE)
+ if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
bio_for_each_segment(bvl, bio, i) {
- int len = bio_iovec_idx(bio, i)->bv_len;
+ int len = bvl->bv_len;
int clen;
int b_offset = 0;
@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len;
if (clen > 0) {
- b_offset += bio_iovec_idx(bio, i)->bv_offset;
- bio_page = bio_iovec_idx(bio, i)->bv_page;
+ b_offset += bvl->bv_offset;
+ bio_page = bvl->bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit);
@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
printk(KERN_INFO "md/raid:%s: device %s operational as raid"
" disk %d\n",
mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
- } else
+ } else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
}
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 2d8b4044be36..b2b0c45f32a9 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -20,6 +20,7 @@
*/
#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -49,11 +50,12 @@
#define UNSET (-1U)
-#define DM1105_BOARD_NOAUTO UNSET
-#define DM1105_BOARD_UNKNOWN 0
-#define DM1105_BOARD_DVBWORLD_2002 1
-#define DM1105_BOARD_DVBWORLD_2004 2
-#define DM1105_BOARD_AXESS_DM05 3
+#define DM1105_BOARD_NOAUTO UNSET
+#define DM1105_BOARD_UNKNOWN 0
+#define DM1105_BOARD_DVBWORLD_2002 1
+#define DM1105_BOARD_DVBWORLD_2004 2
+#define DM1105_BOARD_AXESS_DM05 3
+#define DM1105_BOARD_UNBRANDED_I2C_ON_GPIO 4
/* ----------------------------------------------- */
/*
@@ -157,22 +159,38 @@
#define DM1105_MAX 0x04
#define DRIVER_NAME "dm1105"
+#define DM1105_I2C_GPIO_NAME "dm1105-gpio"
#define DM1105_DMA_PACKETS 47
#define DM1105_DMA_PACKET_LENGTH (128*4)
#define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS)
+/* */
+#define GPIO08 (1 << 8)
+#define GPIO13 (1 << 13)
+#define GPIO14 (1 << 14)
+#define GPIO15 (1 << 15)
+#define GPIO16 (1 << 16)
+#define GPIO17 (1 << 17)
+#define GPIO_ALL 0x03ffff
+
/* GPIO's for LNB power control */
-#define DM1105_LNB_MASK 0x00000000
-#define DM1105_LNB_OFF 0x00020000
-#define DM1105_LNB_13V 0x00010100
-#define DM1105_LNB_18V 0x00000100
+#define DM1105_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
+#define DM1105_LNB_OFF GPIO17
+#define DM1105_LNB_13V (GPIO16 | GPIO08)
+#define DM1105_LNB_18V GPIO08
/* GPIO's for LNB power control for Axess DM05 */
-#define DM05_LNB_MASK 0x00000000
-#define DM05_LNB_OFF 0x00020000/* actually 13v */
-#define DM05_LNB_13V 0x00020000
-#define DM05_LNB_18V 0x00030000
+#define DM05_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
+#define DM05_LNB_OFF GPIO17/* actually 13v */
+#define DM05_LNB_13V GPIO17
+#define DM05_LNB_18V (GPIO17 | GPIO16)
+
+/* GPIO's for LNB power control for unbranded with I2C on GPIO */
+#define UNBR_LNB_MASK (GPIO17 | GPIO16)
+#define UNBR_LNB_OFF 0
+#define UNBR_LNB_13V GPIO17
+#define UNBR_LNB_18V (GPIO17 | GPIO16)
static unsigned int card[] = {[0 ... 3] = UNSET };
module_param_array(card, int, NULL, 0444);
@@ -187,7 +205,11 @@ static unsigned int dm1105_devcount;
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dm1105_board {
- char *name;
+ char *name;
+ struct {
+ u32 mask, off, v13, v18;
+ } lnb;
+ u32 gpio_scl, gpio_sda;
};
struct dm1105_subid {
@@ -199,15 +221,50 @@ struct dm1105_subid {
static const struct dm1105_board dm1105_boards[] = {
[DM1105_BOARD_UNKNOWN] = {
.name = "UNKNOWN/GENERIC",
+ .lnb = {
+ .mask = DM1105_LNB_MASK,
+ .off = DM1105_LNB_OFF,
+ .v13 = DM1105_LNB_13V,
+ .v18 = DM1105_LNB_18V,
+ },
},
[DM1105_BOARD_DVBWORLD_2002] = {
.name = "DVBWorld PCI 2002",
+ .lnb = {
+ .mask = DM1105_LNB_MASK,
+ .off = DM1105_LNB_OFF,
+ .v13 = DM1105_LNB_13V,
+ .v18 = DM1105_LNB_18V,
+ },
},
[DM1105_BOARD_DVBWORLD_2004] = {
.name = "DVBWorld PCI 2004",
+ .lnb = {
+ .mask = DM1105_LNB_MASK,
+ .off = DM1105_LNB_OFF,
+ .v13 = DM1105_LNB_13V,
+ .v18 = DM1105_LNB_18V,
+ },
},
[DM1105_BOARD_AXESS_DM05] = {
.name = "Axess/EasyTv DM05",
+ .lnb = {
+ .mask = DM05_LNB_MASK,
+ .off = DM05_LNB_OFF,
+ .v13 = DM05_LNB_13V,
+ .v18 = DM05_LNB_18V,
+ },
+ },
+ [DM1105_BOARD_UNBRANDED_I2C_ON_GPIO] = {
+ .name = "Unbranded DM1105 with i2c on GPIOs",
+ .lnb = {
+ .mask = UNBR_LNB_MASK,
+ .off = UNBR_LNB_OFF,
+ .v13 = UNBR_LNB_13V,
+ .v18 = UNBR_LNB_18V,
+ },
+ .gpio_scl = GPIO14,
+ .gpio_sda = GPIO13,
},
};
@@ -293,6 +350,8 @@ struct dm1105_dev {
/* i2c */
struct i2c_adapter i2c_adap;
+ struct i2c_adapter i2c_bb_adap;
+ struct i2c_algo_bit_data i2c_bit;
/* irq */
struct work_struct work;
@@ -328,6 +387,103 @@ struct dm1105_dev {
#define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit))
#define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0)
+/* The chip has 18 GPIOs. In HOST mode GPIO's used as 15 bit address lines,
+ so we can use only 3 GPIO's from GPIO15 to GPIO17.
+ Here I don't check whether HOST is enebled as it is not implemented yet.
+ */
+static void dm1105_gpio_set(struct dm1105_dev *dev, u32 mask)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ dm_setl(DM1105_GPIOVAL, mask & 0x0003ffff);
+
+}
+
+static void dm1105_gpio_clear(struct dm1105_dev *dev, u32 mask)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ dm_clearl(DM1105_GPIOVAL, mask & 0x0003ffff);
+
+}
+
+static void dm1105_gpio_andor(struct dm1105_dev *dev, u32 mask, u32 val)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ dm_andorl(DM1105_GPIOVAL, mask & 0x0003ffff, val);
+
+}
+
+static u32 dm1105_gpio_get(struct dm1105_dev *dev, u32 mask)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if (mask & 0x0003ffff)
+ return dm_readl(DM1105_GPIOVAL) & mask & 0x0003ffff;
+
+ return 0;
+}
+
+static void dm1105_gpio_enable(struct dm1105_dev *dev, u32 mask, int asoutput)
+{
+ if (mask & 0xfffc0000)
+ printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+ if ((mask & 0x0003ffff) && asoutput)
+ dm_clearl(DM1105_GPIOCTR, mask & 0x0003ffff);
+ else if ((mask & 0x0003ffff) && !asoutput)
+ dm_setl(DM1105_GPIOCTR, mask & 0x0003ffff);
+
+}
+
+static void dm1105_setline(struct dm1105_dev *dev, u32 line, int state)
+{
+ if (state)
+ dm1105_gpio_enable(dev, line, 0);
+ else {
+ dm1105_gpio_enable(dev, line, 1);
+ dm1105_gpio_clear(dev, line);
+ }
+}
+
+static void dm1105_setsda(void *data, int state)
+{
+ struct dm1105_dev *dev = data;
+
+ dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_sda, state);
+}
+
+static void dm1105_setscl(void *data, int state)
+{
+ struct dm1105_dev *dev = data;
+
+ dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_scl, state);
+}
+
+static int dm1105_getsda(void *data)
+{
+ struct dm1105_dev *dev = data;
+
+ return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_sda)
+ ? 1 : 0;
+}
+
+static int dm1105_getscl(void *data)
+{
+ struct dm1105_dev *dev = data;
+
+ return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_scl)
+ ? 1 : 0;
+}
+
static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
@@ -436,31 +592,20 @@ static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe)
static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct dm1105_dev *dev = frontend_to_dm1105_dev(fe);
- u32 lnb_mask, lnb_13v, lnb_18v, lnb_off;
- switch (dev->boardnr) {
- case DM1105_BOARD_AXESS_DM05:
- lnb_mask = DM05_LNB_MASK;
- lnb_off = DM05_LNB_OFF;
- lnb_13v = DM05_LNB_13V;
- lnb_18v = DM05_LNB_18V;
- break;
- case DM1105_BOARD_DVBWORLD_2002:
- case DM1105_BOARD_DVBWORLD_2004:
- default:
- lnb_mask = DM1105_LNB_MASK;
- lnb_off = DM1105_LNB_OFF;
- lnb_13v = DM1105_LNB_13V;
- lnb_18v = DM1105_LNB_18V;
- }
-
- dm_writel(DM1105_GPIOCTR, lnb_mask);
+ dm1105_gpio_enable(dev, dm1105_boards[dev->boardnr].lnb.mask, 1);
if (voltage == SEC_VOLTAGE_18)
- dm_writel(DM1105_GPIOVAL, lnb_18v);
+ dm1105_gpio_andor(dev,
+ dm1105_boards[dev->boardnr].lnb.mask,
+ dm1105_boards[dev->boardnr].lnb.v18);
else if (voltage == SEC_VOLTAGE_13)
- dm_writel(DM1105_GPIOVAL, lnb_13v);
+ dm1105_gpio_andor(dev,
+ dm1105_boards[dev->boardnr].lnb.mask,
+ dm1105_boards[dev->boardnr].lnb.v13);
else
- dm_writel(DM1105_GPIOVAL, lnb_off);
+ dm1105_gpio_andor(dev,
+ dm1105_boards[dev->boardnr].lnb.mask,
+ dm1105_boards[dev->boardnr].lnb.off);
return 0;
}
@@ -708,6 +853,38 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
int ret;
switch (dev->boardnr) {
+ case DM1105_BOARD_UNBRANDED_I2C_ON_GPIO:
+ dm1105_gpio_enable(dev, GPIO15, 1);
+ dm1105_gpio_clear(dev, GPIO15);
+ msleep(100);
+ dm1105_gpio_set(dev, GPIO15);
+ msleep(200);
+ dev->fe = dvb_attach(
+ stv0299_attach, &sharp_z0194a_config,
+ &dev->i2c_bb_adap);
+ if (dev->fe) {
+ dev->fe->ops.set_voltage = dm1105_set_voltage;
+ dvb_attach(dvb_pll_attach, dev->fe, 0x60,
+ &dev->i2c_bb_adap, DVB_PLL_OPERA1);
+ break;
+ }
+
+ dev->fe = dvb_attach(
+ stv0288_attach, &earda_config,
+ &dev->i2c_bb_adap);
+ if (dev->fe) {
+ dev->fe->ops.set_voltage = dm1105_set_voltage;
+ dvb_attach(stb6000_attach, dev->fe, 0x61,
+ &dev->i2c_bb_adap);
+ break;
+ }
+
+ dev->fe = dvb_attach(
+ si21xx_attach, &serit_config,
+ &dev->i2c_bb_adap);
+ if (dev->fe)
+ dev->fe->ops.set_voltage = dm1105_set_voltage;
+ break;
case DM1105_BOARD_DVBWORLD_2004:
dev->fe = dvb_attach(
cx24116_attach, &serit_sp2633_config,
@@ -870,11 +1047,32 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
if (ret < 0)
goto err_dm1105_hw_exit;
+ i2c_set_adapdata(&dev->i2c_bb_adap, dev);
+ strcpy(dev->i2c_bb_adap.name, DM1105_I2C_GPIO_NAME);
+ dev->i2c_bb_adap.owner = THIS_MODULE;
+ dev->i2c_bb_adap.dev.parent = &pdev->dev;
+ dev->i2c_bb_adap.algo_data = &dev->i2c_bit;
+ dev->i2c_bit.data = dev;
+ dev->i2c_bit.setsda = dm1105_setsda;
+ dev->i2c_bit.setscl = dm1105_setscl;
+ dev->i2c_bit.getsda = dm1105_getsda;
+ dev->i2c_bit.getscl = dm1105_getscl;
+ dev->i2c_bit.udelay = 10;
+ dev->i2c_bit.timeout = 10;
+
+ /* Raise SCL and SDA */
+ dm1105_setsda(dev, 1);
+ dm1105_setscl(dev, 1);
+
+ ret = i2c_bit_add_bus(&dev->i2c_bb_adap);
+ if (ret < 0)
+ goto err_i2c_del_adapter;
+
/* dvb */
ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME,
THIS_MODULE, &pdev->dev, adapter_nr);
if (ret < 0)
- goto err_i2c_del_adapter;
+ goto err_i2c_del_adapters;
dvb_adapter = &dev->dvb_adapter;
@@ -952,6 +1150,8 @@ err_dvb_dmx_release:
dvb_dmx_release(dvbdemux);
err_dvb_unregister_adapter:
dvb_unregister_adapter(dvb_adapter);
+err_i2c_del_adapters:
+ i2c_del_adapter(&dev->i2c_bb_adap);
err_i2c_del_adapter:
i2c_del_adapter(&dev->i2c_adap);
err_dm1105_hw_exit:
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 4dc1ca333236..7c327b54308e 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -60,8 +60,6 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
int act_len, ret;
u8 buf[64];
- if (slen > sizeof(buf))
- slen = sizeof(buf);
memcpy(&buf[0], sbuf, slen);
buf[60] = state->seq++;
@@ -180,30 +178,37 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret = 0, inc, i = 0;
+ u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
- u8 buf[6];
+ if (msg[i].len > 2 || msg[i+1].len > 60) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
buf[0] = CMD_I2C_READ;
buf[1] = (msg[i].addr << 1) | 0x01;
buf[2] = msg[i].buf[0];
buf[3] = msg[i].buf[1];
buf[4] = msg[i].len-1;
buf[5] = msg[i+1].len;
- ret = anysee_ctrl_msg(d, buf, sizeof(buf), msg[i+1].buf,
+ ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
msg[i+1].len);
inc = 2;
} else {
- u8 buf[4+msg[i].len];
+ if (msg[i].len > 48) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
buf[0] = CMD_I2C_WRITE;
buf[1] = (msg[i].addr << 1);
buf[2] = msg[i].len;
buf[3] = 0x01;
memcpy(&buf[4], msg[i].buf, msg[i].len);
- ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
+ ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0);
inc = 1;
}
if (ret)
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index f36f471deae2..37b146961ae2 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -207,17 +207,6 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
rbuff, sizeof(rbuff));
return ret;
}
-static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u32 keypress)
-{
- struct dvb_usb_device *d = adap->dev;
-
- deb_info(1, "INT Key Keypress =%04x", keypress);
-
- if (keypress > 0)
- rc_keydown(d->rc_dev, keypress, 0);
-
- return 0;
-}
static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
{
@@ -256,6 +245,7 @@ static void lme2510_int_response(struct urb *lme_urb)
struct lme2510_state *st = adap->dev->priv;
static u8 *ibuf, *rbuf;
int i = 0, offset;
+ u32 key;
switch (lme_urb->status) {
case 0:
@@ -282,10 +272,16 @@ static void lme2510_int_response(struct urb *lme_urb)
switch (ibuf[0]) {
case 0xaa:
- debug_data_snipet(1, "INT Remote data snipet in", ibuf);
- lme2510_remote_keypress(adap,
- (u32)(ibuf[2] << 24) + (ibuf[3] << 16) +
- (ibuf[4] << 8) + ibuf[5]);
+ debug_data_snipet(1, "INT Remote data snipet", ibuf);
+ if ((ibuf[4] + ibuf[5]) == 0xff) {
+ key = ibuf[5];
+ key += (ibuf[3] > 0)
+ ? (ibuf[3] ^ 0xff) << 8 : 0;
+ key += (ibuf[2] ^ 0xff) << 16;
+ deb_info(1, "INT Key =%08x", key);
+ if (adap->dev->rc_dev != NULL)
+ rc_keydown(adap->dev->rc_dev, key, 0);
+ }
break;
case 0xbb:
switch (st->tuner_config) {
@@ -691,45 +687,6 @@ static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return (ret < 0) ? -ENODEV : 0;
}
-static int lme2510_int_service(struct dvb_usb_adapter *adap)
-{
- struct dvb_usb_device *d = adap->dev;
- struct rc_dev *rc;
- int ret;
-
- info("STA Configuring Remote");
-
- rc = rc_allocate_device();
- if (!rc)
- return -ENOMEM;
-
- usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
- strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
-
- rc->input_name = "LME2510 Remote Control";
- rc->input_phys = d->rc_phys;
- rc->map_name = RC_MAP_LME2510;
- rc->driver_name = "LME 2510";
- usb_to_input_id(d->udev, &rc->input_id);
-
- ret = rc_register_device(rc);
- if (ret) {
- rc_free_device(rc);
- return ret;
- }
- d->rc_dev = rc;
-
- /* Start the Interrupt */
- ret = lme2510_int_read(adap);
- if (ret < 0) {
- rc_unregister_device(rc);
- info("INT Unable to start Interrupt Service");
- return -ENODEV;
- }
-
- return 0;
-}
-
static u8 check_sum(u8 *p, u8 len)
{
u8 sum = 0;
@@ -831,7 +788,7 @@ static int lme_firmware_switch(struct usb_device *udev, int cold)
cold_fw = !cold;
- if (udev->descriptor.idProduct == 0x1122) {
+ if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) {
switch (dvb_usb_lme2510_firmware) {
default:
dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1053,8 +1010,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
end: if (ret) {
- kfree(adap->fe);
- adap->fe = NULL;
+ if (adap->fe) {
+ dvb_frontend_detach(adap->fe);
+ adap->fe = NULL;
+ }
+ adap->dev->props.rc.core.rc_codes = NULL;
return -ENODEV;
}
@@ -1097,8 +1057,12 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
return -ENODEV;
}
- /* Start the Interrupt & Remote*/
- ret = lme2510_int_service(adap);
+ /* Start the Interrupt*/
+ ret = lme2510_int_read(adap);
+ if (ret < 0) {
+ info("INT Unable to start Interrupt Service");
+ return -ENODEV;
+ }
return ret;
}
@@ -1204,6 +1168,12 @@ static struct dvb_usb_device_properties lme2510_properties = {
}
}
},
+ .rc.core = {
+ .protocol = RC_TYPE_NEC,
+ .module_name = "LME2510 Remote Control",
+ .allowed_protos = RC_TYPE_NEC,
+ .rc_codes = RC_MAP_LME2510,
+ },
.power_ctrl = lme2510_powerup,
.identify_state = lme2510_identify_state,
.i2c_algo = &lme2510_i2c_algo,
@@ -1246,6 +1216,12 @@ static struct dvb_usb_device_properties lme2510c_properties = {
}
}
},
+ .rc.core = {
+ .protocol = RC_TYPE_NEC,
+ .module_name = "LME2510 Remote Control",
+ .allowed_protos = RC_TYPE_NEC,
+ .rc_codes = RC_MAP_LME2510,
+ },
.power_ctrl = lme2510_powerup,
.identify_state = lme2510_identify_state,
.i2c_algo = &lme2510_i2c_algo,
@@ -1269,19 +1245,21 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
adap->feedcount = 0;
}
- if (st->lme_urb != NULL) {
+ if (st->usb_buffer != NULL) {
st->i2c_talk_onoff = 1;
st->signal_lock = 0;
st->signal_level = 0;
st->signal_sn = 0;
buffer = st->usb_buffer;
+ }
+
+ if (st->lme_urb != NULL) {
usb_kill_urb(st->lme_urb);
usb_free_coherent(d->udev, 5000, st->buffer,
st->lme_urb->transfer_dma);
info("Interrupt Service Stopped");
- rc_unregister_device(d->rc_dev);
- info("Remote Stopped");
}
+
return buffer;
}
@@ -1293,7 +1271,8 @@ static void lme2510_exit(struct usb_interface *intf)
if (d != NULL) {
usb_buffer = lme2510_exit_int(d);
dvb_usb_device_exit(intf);
- kfree(usb_buffer);
+ if (usb_buffer != NULL)
+ kfree(usb_buffer);
}
}
@@ -1327,5 +1306,5 @@ module_exit(lme2510_module_exit);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.86");
+MODULE_VERSION("1.88");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
index 2da55ec20392..d70eee00f33a 100644
--- a/drivers/media/dvb/frontends/stb0899_algo.c
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -23,7 +23,7 @@
#include "stb0899_priv.h"
#include "stb0899_reg.h"
-inline u32 stb0899_do_div(u64 n, u32 d)
+static inline u32 stb0899_do_div(u64 n, u32 d)
{
/* wrap do_div() for ease of use */
diff --git a/drivers/media/dvb/frontends/tda8261.c b/drivers/media/dvb/frontends/tda8261.c
index 1742056a34e8..53c7d8f1df28 100644
--- a/drivers/media/dvb/frontends/tda8261.c
+++ b/drivers/media/dvb/frontends/tda8261.c
@@ -224,7 +224,6 @@ exit:
}
EXPORT_SYMBOL(tda8261_attach);
-MODULE_PARM_DESC(verbose, "Set verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index af5263c6625a..7b42ace419d9 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -213,14 +213,14 @@ int __must_check media_devnode_register(struct media_devnode *mdev)
/* Part 1: Find a free minor number */
mutex_lock(&media_devnode_lock);
- minor = find_next_zero_bit(media_devnode_nums, 0, MEDIA_NUM_DEVICES);
+ minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
printk(KERN_ERR "could not get a free minor\n");
return -ENFILE;
}
- set_bit(mdev->minor, media_devnode_nums);
+ set_bit(minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
mdev->minor = minor;
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 46cacf845049..459f7272d326 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = wl1273_fm_get_tx_ctune(radio);
+ ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
break;
default:
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index d50e5ac75ab6..87010724f914 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -191,7 +191,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = fm_tx_get_tune_cap_val(fmdev);
+ ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
break;
default:
fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 154c337f00fd..7d4bbc226d06 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -148,6 +148,18 @@ config IR_ITE_CIR
To compile this driver as a module, choose M here: the
module will be called ite-cir.
+config IR_FINTEK
+ tristate "Fintek Consumer Infrared Transceiver"
+ depends on PNP
+ depends on RC_CORE
+ ---help---
+ Say Y here to enable support for integrated infrared receiver
+ /transciever made by Fintek. This chip is found on assorted
+ Jetway motherboards (and of course, possibly others).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fintek-cir.
+
config IR_NUVOTON
tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 1f90a219a162..52830e5f4eaa 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
+obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_ENE) += ene_ir.o
obj-$(CONFIG_IR_REDRAT3) += redrat3.o
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
new file mode 100644
index 000000000000..8fa539dde1b4
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.c
@@ -0,0 +1,684 @@
+/*
+ * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
+ *
+ * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
+ *
+ * Special thanks to Fintek for providing hardware and spec sheets.
+ * This driver is based upon the nuvoton, ite and ene drivers for
+ * similar hardware.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/rc-core.h>
+#include <linux/pci_ids.h>
+
+#include "fintek-cir.h"
+
+/* write val to config reg */
+static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+ fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
+ __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
+ outb(reg, fintek->cr_ip);
+ outb(val, fintek->cr_dp);
+}
+
+/* read val from config reg */
+static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg)
+{
+ u8 val;
+
+ outb(reg, fintek->cr_ip);
+ val = inb(fintek->cr_dp);
+
+ fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
+ __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
+ return val;
+}
+
+/* update config register bit without changing other bits */
+static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+ u8 tmp = fintek_cr_read(fintek, reg) | val;
+ fintek_cr_write(fintek, tmp, reg);
+}
+
+/* clear config register bit without changing other bits */
+static inline void fintek_clear_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+ u8 tmp = fintek_cr_read(fintek, reg) & ~val;
+ fintek_cr_write(fintek, tmp, reg);
+}
+
+/* enter config mode */
+static inline void fintek_config_mode_enable(struct fintek_dev *fintek)
+{
+ /* Enabling Config Mode explicitly requires writing 2x */
+ outb(CONFIG_REG_ENABLE, fintek->cr_ip);
+ outb(CONFIG_REG_ENABLE, fintek->cr_ip);
+}
+
+/* exit config mode */
+static inline void fintek_config_mode_disable(struct fintek_dev *fintek)
+{
+ outb(CONFIG_REG_DISABLE, fintek->cr_ip);
+}
+
+/*
+ * When you want to address a specific logical device, write its logical
+ * device number to GCR_LOGICAL_DEV_NO
+ */
+static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev)
+{
+ fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO);
+}
+
+/* write val to cir config register */
+static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset)
+{
+ outb(val, fintek->cir_addr + offset);
+}
+
+/* read val from cir config register */
+static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
+{
+ u8 val;
+
+ val = inb(fintek->cir_addr + offset);
+
+ return val;
+}
+
+#define pr_reg(text, ...) \
+ printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+/* dump current cir register contents */
+static void cir_dump_regs(struct fintek_dev *fintek)
+{
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+
+ pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
+ pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
+ (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
+ fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
+ pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
+ fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
+
+ fintek_config_mode_disable(fintek);
+
+ pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
+ pr_reg(" * STATUS: 0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS));
+ pr_reg(" * CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL));
+ pr_reg(" * RX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA));
+ pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
+ pr_reg(" * TX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA));
+}
+
+/* detect hardware features */
+static int fintek_hw_detect(struct fintek_dev *fintek)
+{
+ unsigned long flags;
+ u8 chip_major, chip_minor;
+ u8 vendor_major, vendor_minor;
+ u8 portsel, ir_class;
+ u16 vendor;
+ int ret = 0;
+
+ fintek_config_mode_enable(fintek);
+
+ /* Check if we're using config port 0x4e or 0x2e */
+ portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
+ if (portsel == 0xff) {
+ fit_pr(KERN_INFO, "first portsel read was bunk, trying alt");
+ fintek_config_mode_disable(fintek);
+ fintek->cr_ip = CR_INDEX_PORT2;
+ fintek->cr_dp = CR_DATA_PORT2;
+ fintek_config_mode_enable(fintek);
+ portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
+ }
+ fit_dbg("portsel reg: 0x%02x", portsel);
+
+ ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS);
+ fit_dbg("ir_class reg: 0x%02x", ir_class);
+
+ switch (ir_class) {
+ case CLASS_RX_2TX:
+ case CLASS_RX_1TX:
+ fintek->hw_tx_capable = true;
+ break;
+ case CLASS_RX_ONLY:
+ default:
+ fintek->hw_tx_capable = false;
+ break;
+ }
+
+ chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI);
+ chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO);
+
+ vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI);
+ vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO);
+ vendor = vendor_major << 8 | vendor_minor;
+
+ if (vendor != VENDOR_ID_FINTEK)
+ fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor);
+ else
+ fit_dbg("Read Fintek vendor ID from chip");
+
+ fintek_config_mode_disable(fintek);
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ fintek->chip_major = chip_major;
+ fintek->chip_minor = chip_minor;
+ fintek->chip_vendor = vendor;
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+ return ret;
+}
+
+static void fintek_cir_ldev_init(struct fintek_dev *fintek)
+{
+ /* Select CIR logical device and enable */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+ /* Write allocated CIR address and IRQ information to hardware */
+ fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI);
+ fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO);
+
+ fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL);
+
+ fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)",
+ fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len);
+}
+
+/* enable CIR interrupts */
+static void fintek_enable_cir_irq(struct fintek_dev *fintek)
+{
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
+}
+
+static void fintek_cir_regs_init(struct fintek_dev *fintek)
+{
+ /* clear any and all stray interrupts */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+ /* and finally, enable interrupts */
+ fintek_enable_cir_irq(fintek);
+}
+
+static void fintek_enable_wake(struct fintek_dev *fintek)
+{
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI);
+
+ /* Allow CIR PME's to wake system */
+ fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG);
+ /* Enable CIR PME's */
+ fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG);
+ /* Clear CIR PME status register */
+ fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG);
+ /* Save state */
+ fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG);
+
+ fintek_config_mode_disable(fintek);
+}
+
+static int fintek_cmdsize(u8 cmd, u8 subcmd)
+{
+ int datasize = 0;
+
+ switch (cmd) {
+ case BUF_COMMAND_NULL:
+ if (subcmd == BUF_HW_CMD_HEADER)
+ datasize = 1;
+ break;
+ case BUF_HW_CMD_HEADER:
+ if (subcmd == BUF_CMD_G_REVISION)
+ datasize = 2;
+ break;
+ case BUF_COMMAND_HEADER:
+ switch (subcmd) {
+ case BUF_CMD_S_CARRIER:
+ case BUF_CMD_S_TIMEOUT:
+ case BUF_RSP_PULSE_COUNT:
+ datasize = 2;
+ break;
+ case BUF_CMD_SIG_END:
+ case BUF_CMD_S_TXMASK:
+ case BUF_CMD_S_RXSENSOR:
+ datasize = 1;
+ break;
+ }
+ }
+
+ return datasize;
+}
+
+/* process ir data stored in driver buffer */
+static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
+{
+ DEFINE_IR_RAW_EVENT(rawir);
+ u8 sample;
+ int i;
+
+ for (i = 0; i < fintek->pkts; i++) {
+ sample = fintek->buf[i];
+ switch (fintek->parser_state) {
+ case CMD_HEADER:
+ fintek->cmd = sample;
+ if ((fintek->cmd == BUF_COMMAND_HEADER) ||
+ ((fintek->cmd & BUF_COMMAND_MASK) !=
+ BUF_PULSE_BIT)) {
+ fintek->parser_state = SUBCMD;
+ continue;
+ }
+ fintek->rem = (fintek->cmd & BUF_LEN_MASK);
+ fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem);
+ if (fintek->rem)
+ fintek->parser_state = PARSE_IRDATA;
+ else
+ ir_raw_event_reset(fintek->rdev);
+ break;
+ case SUBCMD:
+ fintek->rem = fintek_cmdsize(fintek->cmd, sample);
+ fintek->parser_state = CMD_DATA;
+ break;
+ case CMD_DATA:
+ fintek->rem--;
+ break;
+ case PARSE_IRDATA:
+ fintek->rem--;
+ init_ir_raw_event(&rawir);
+ rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
+ rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK)
+ * CIR_SAMPLE_PERIOD);
+
+ fit_dbg("Storing %s with duration %d",
+ rawir.pulse ? "pulse" : "space",
+ rawir.duration);
+ ir_raw_event_store_with_filter(fintek->rdev, &rawir);
+ break;
+ }
+
+ if ((fintek->parser_state != CMD_HEADER) && !fintek->rem)
+ fintek->parser_state = CMD_HEADER;
+ }
+
+ fintek->pkts = 0;
+
+ fit_dbg("Calling ir_raw_event_handle");
+ ir_raw_event_handle(fintek->rdev);
+}
+
+/* copy data from hardware rx register into driver buffer */
+static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs)
+{
+ unsigned long flags;
+ u8 sample, status;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+
+ /*
+ * We must read data from CIR_RX_DATA until the hardware IR buffer
+ * is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in
+ * the CIR_STATUS register
+ */
+ do {
+ sample = fintek_cir_reg_read(fintek, CIR_RX_DATA);
+ fit_dbg("%s: sample: 0x%02x", __func__, sample);
+
+ fintek->buf[fintek->pkts] = sample;
+ fintek->pkts++;
+
+ status = fintek_cir_reg_read(fintek, CIR_STATUS);
+ if (!(status & CIR_STATUS_IRQ_EN))
+ break;
+ } while (status & rx_irqs);
+
+ fintek_process_rx_ir_data(fintek);
+
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+}
+
+static void fintek_cir_log_irqs(u8 status)
+{
+ fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status,
+ status & CIR_STATUS_IRQ_EN ? " IRQEN" : "",
+ status & CIR_STATUS_TX_FINISH ? " TXF" : "",
+ status & CIR_STATUS_TX_UNDERRUN ? " TXU" : "",
+ status & CIR_STATUS_RX_TIMEOUT ? " RXTO" : "",
+ status & CIR_STATUS_RX_RECEIVE ? " RXOK" : "");
+}
+
+/* interrupt service routine for incoming and outgoing CIR data */
+static irqreturn_t fintek_cir_isr(int irq, void *data)
+{
+ struct fintek_dev *fintek = data;
+ u8 status, rx_irqs;
+
+ fit_dbg_verbose("%s firing", __func__);
+
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_config_mode_disable(fintek);
+
+ /*
+ * Get IR Status register contents. Write 1 to ack/clear
+ *
+ * bit: reg name - description
+ * 3: TX_FINISH - TX is finished
+ * 2: TX_UNDERRUN - TX underrun
+ * 1: RX_TIMEOUT - RX data timeout
+ * 0: RX_RECEIVE - RX data received
+ */
+ status = fintek_cir_reg_read(fintek, CIR_STATUS);
+ if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) {
+ fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status);
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ return IRQ_RETVAL(IRQ_NONE);
+ }
+
+ if (debug)
+ fintek_cir_log_irqs(status);
+
+ rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT);
+ if (rx_irqs)
+ fintek_get_rx_ir_data(fintek, rx_irqs);
+
+ /* ack/clear all irq flags we've got */
+ fintek_cir_reg_write(fintek, status, CIR_STATUS);
+
+ fit_dbg_verbose("%s done", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+static void fintek_enable_cir(struct fintek_dev *fintek)
+{
+ /* set IRQ enabled */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
+
+ fintek_config_mode_enable(fintek);
+
+ /* enable the CIR logical device */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+
+ /* clear all pending interrupts */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+ /* enable interrupts */
+ fintek_enable_cir_irq(fintek);
+}
+
+static void fintek_disable_cir(struct fintek_dev *fintek)
+{
+ fintek_config_mode_enable(fintek);
+
+ /* disable the CIR logical device */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+}
+
+static int fintek_open(struct rc_dev *dev)
+{
+ struct fintek_dev *fintek = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ fintek_enable_cir(fintek);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+ return 0;
+}
+
+static void fintek_close(struct rc_dev *dev)
+{
+ struct fintek_dev *fintek = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ fintek_disable_cir(fintek);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+}
+
+/* Allocate memory, probe hardware, and initialize everything */
+static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+{
+ struct fintek_dev *fintek;
+ struct rc_dev *rdev;
+ int ret = -ENOMEM;
+
+ fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL);
+ if (!fintek)
+ return ret;
+
+ /* input device for IR remote (and tx) */
+ rdev = rc_allocate_device();
+ if (!rdev)
+ goto failure;
+
+ ret = -ENODEV;
+ /* validate pnp resources */
+ if (!pnp_port_valid(pdev, 0)) {
+ dev_err(&pdev->dev, "IR PNP Port not valid!\n");
+ goto failure;
+ }
+
+ if (!pnp_irq_valid(pdev, 0)) {
+ dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
+ goto failure;
+ }
+
+ fintek->cir_addr = pnp_port_start(pdev, 0);
+ fintek->cir_irq = pnp_irq(pdev, 0);
+ fintek->cir_port_len = pnp_port_len(pdev, 0);
+
+ fintek->cr_ip = CR_INDEX_PORT;
+ fintek->cr_dp = CR_DATA_PORT;
+
+ spin_lock_init(&fintek->fintek_lock);
+
+ ret = -EBUSY;
+ /* now claim resources */
+ if (!request_region(fintek->cir_addr,
+ fintek->cir_port_len, FINTEK_DRIVER_NAME))
+ goto failure;
+
+ if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
+ FINTEK_DRIVER_NAME, (void *)fintek))
+ goto failure;
+
+ pnp_set_drvdata(pdev, fintek);
+ fintek->pdev = pdev;
+
+ ret = fintek_hw_detect(fintek);
+ if (ret)
+ goto failure;
+
+ /* Initialize CIR & CIR Wake Logical Devices */
+ fintek_config_mode_enable(fintek);
+ fintek_cir_ldev_init(fintek);
+ fintek_config_mode_disable(fintek);
+
+ /* Initialize CIR & CIR Wake Config Registers */
+ fintek_cir_regs_init(fintek);
+
+ /* Set up the rc device */
+ rdev->priv = fintek;
+ rdev->driver_type = RC_DRIVER_IR_RAW;
+ rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->open = fintek_open;
+ rdev->close = fintek_close;
+ rdev->input_name = FINTEK_DESCRIPTION;
+ rdev->input_phys = "fintek/cir0";
+ rdev->input_id.bustype = BUS_HOST;
+ rdev->input_id.vendor = VENDOR_ID_FINTEK;
+ rdev->input_id.product = fintek->chip_major;
+ rdev->input_id.version = fintek->chip_minor;
+ rdev->dev.parent = &pdev->dev;
+ rdev->driver_name = FINTEK_DRIVER_NAME;
+ rdev->map_name = RC_MAP_RC6_MCE;
+ rdev->timeout = US_TO_NS(1000);
+ /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
+ rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto failure;
+
+ device_init_wakeup(&pdev->dev, true);
+ fintek->rdev = rdev;
+ fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+ if (debug)
+ cir_dump_regs(fintek);
+
+ return 0;
+
+failure:
+ if (fintek->cir_irq)
+ free_irq(fintek->cir_irq, fintek);
+ if (fintek->cir_addr)
+ release_region(fintek->cir_addr, fintek->cir_port_len);
+
+ rc_free_device(rdev);
+ kfree(fintek);
+
+ return ret;
+}
+
+static void __devexit fintek_remove(struct pnp_dev *pdev)
+{
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+ /* disable CIR */
+ fintek_disable_cir(fintek);
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ /* enable CIR Wake (for IR power-on) */
+ fintek_enable_wake(fintek);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+ /* free resources */
+ free_irq(fintek->cir_irq, fintek);
+ release_region(fintek->cir_addr, fintek->cir_port_len);
+
+ rc_unregister_device(fintek->rdev);
+
+ kfree(fintek);
+}
+
+static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+
+ fit_dbg("%s called", __func__);
+
+ /* disable all CIR interrupts */
+ fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+ fintek_config_mode_enable(fintek);
+
+ /* disable cir logical dev */
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+
+ /* make sure wake is enabled */
+ fintek_enable_wake(fintek);
+
+ return 0;
+}
+
+static int fintek_resume(struct pnp_dev *pdev)
+{
+ int ret = 0;
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+
+ fit_dbg("%s called", __func__);
+
+ /* open interrupt */
+ fintek_enable_cir_irq(fintek);
+
+ /* Enable CIR logical device */
+ fintek_config_mode_enable(fintek);
+ fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+ fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+ fintek_config_mode_disable(fintek);
+
+ fintek_cir_regs_init(fintek);
+
+ return ret;
+}
+
+static void fintek_shutdown(struct pnp_dev *pdev)
+{
+ struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ fintek_enable_wake(fintek);
+}
+
+static const struct pnp_device_id fintek_ids[] = {
+ { "FIT0002", 0 }, /* CIR */
+ { "", 0 },
+};
+
+static struct pnp_driver fintek_driver = {
+ .name = FINTEK_DRIVER_NAME,
+ .id_table = fintek_ids,
+ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
+ .probe = fintek_probe,
+ .remove = __devexit_p(fintek_remove),
+ .suspend = fintek_suspend,
+ .resume = fintek_resume,
+ .shutdown = fintek_shutdown,
+};
+
+int fintek_init(void)
+{
+ return pnp_register_driver(&fintek_driver);
+}
+
+void fintek_exit(void)
+{
+ pnp_unregister_driver(&fintek_driver);
+}
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging output");
+
+MODULE_DEVICE_TABLE(pnp, fintek_ids);
+MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
+
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(fintek_init);
+module_exit(fintek_exit);
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
new file mode 100644
index 000000000000..1b10b2011f5e
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.h
@@ -0,0 +1,243 @@
+/*
+ * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
+ *
+ * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
+ *
+ * Special thanks to Fintek for providing hardware and spec sheets.
+ * This driver is based upon the nuvoton, ite and ene drivers for
+ * similar hardware.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ioctl.h>
+
+/* platform driver name to register */
+#define FINTEK_DRIVER_NAME "fintek-cir"
+#define FINTEK_DESCRIPTION "Fintek LPC SuperIO Consumer IR Transceiver"
+#define VENDOR_ID_FINTEK 0x1934
+
+
+/* debugging module parameter */
+static int debug;
+
+#define fit_pr(level, text, ...) \
+ printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+#define fit_dbg(text, ...) \
+ if (debug) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define fit_dbg_verbose(text, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define fit_dbg_wake(text, ...) \
+ if (debug > 2) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+
+#define TX_BUF_LEN 256
+#define RX_BUF_LEN 32
+
+struct fintek_dev {
+ struct pnp_dev *pdev;
+ struct rc_dev *rdev;
+
+ spinlock_t fintek_lock;
+
+ /* for rx */
+ u8 buf[RX_BUF_LEN];
+ unsigned int pkts;
+
+ struct {
+ spinlock_t lock;
+ u8 buf[TX_BUF_LEN];
+ unsigned int buf_count;
+ unsigned int cur_buf_num;
+ wait_queue_head_t queue;
+ } tx;
+
+ /* Config register index/data port pair */
+ u8 cr_ip;
+ u8 cr_dp;
+
+ /* hardware I/O settings */
+ unsigned long cir_addr;
+ int cir_irq;
+ int cir_port_len;
+
+ /* hardware id */
+ u8 chip_major;
+ u8 chip_minor;
+ u16 chip_vendor;
+
+ /* hardware features */
+ bool hw_learning_capable;
+ bool hw_tx_capable;
+
+ /* rx settings */
+ bool learning_enabled;
+ bool carrier_detect_enabled;
+
+ enum {
+ CMD_HEADER = 0,
+ SUBCMD,
+ CMD_DATA,
+ PARSE_IRDATA,
+ } parser_state;
+
+ u8 cmd, rem;
+
+ /* carrier period = 1 / frequency */
+ u32 carrier;
+};
+
+/* buffer packet constants, largely identical to mceusb.c */
+#define BUF_PULSE_BIT 0x80
+#define BUF_LEN_MASK 0x1f
+#define BUF_SAMPLE_MASK 0x7f
+
+#define BUF_COMMAND_HEADER 0x9f
+#define BUF_COMMAND_MASK 0xe0
+#define BUF_COMMAND_NULL 0x00
+#define BUF_HW_CMD_HEADER 0xff
+#define BUF_CMD_G_REVISION 0x0b
+#define BUF_CMD_S_CARRIER 0x06
+#define BUF_CMD_S_TIMEOUT 0x0c
+#define BUF_CMD_SIG_END 0x01
+#define BUF_CMD_S_TXMASK 0x08
+#define BUF_CMD_S_RXSENSOR 0x14
+#define BUF_RSP_PULSE_COUNT 0x15
+
+#define CIR_SAMPLE_PERIOD 50
+
+/*
+ * Configuration Register:
+ * Index Port
+ * Data Port
+ */
+#define CR_INDEX_PORT 0x2e
+#define CR_DATA_PORT 0x2f
+
+/* Possible alternate values, depends on how the chip is wired */
+#define CR_INDEX_PORT2 0x4e
+#define CR_DATA_PORT2 0x4f
+
+/*
+ * GCR_CONFIG_PORT_SEL bit 4 specifies which Index Port value is
+ * active. 1 = 0x4e, 0 = 0x2e
+ */
+#define PORT_SEL_PORT_4E_EN 0x10
+
+/* Extended Function Mode enable/disable magic values */
+#define CONFIG_REG_ENABLE 0x87
+#define CONFIG_REG_DISABLE 0xaa
+
+/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
+#define CHIP_ID_HIGH_F71809U 0x04
+#define CHIP_ID_LOW_F71809U 0x08
+
+/*
+ * Global control regs we need to care about:
+ * Global Control def.
+ * Register name addr val. */
+#define GCR_SOFTWARE_RESET 0x02 /* 0x00 */
+#define GCR_LOGICAL_DEV_NO 0x07 /* 0x00 */
+#define GCR_CHIP_ID_HI 0x20 /* 0x04 */
+#define GCR_CHIP_ID_LO 0x21 /* 0x08 */
+#define GCR_VENDOR_ID_HI 0x23 /* 0x19 */
+#define GCR_VENDOR_ID_LO 0x24 /* 0x34 */
+#define GCR_CONFIG_PORT_SEL 0x25 /* 0x01 */
+#define GCR_KBMOUSE_WAKEUP 0x27
+
+#define LOGICAL_DEV_DISABLE 0x00
+#define LOGICAL_DEV_ENABLE 0x01
+
+/* Logical device number of the CIR function */
+#define LOGICAL_DEV_CIR 0x05
+
+/* CIR Logical Device (LDN 0x08) config registers */
+#define CIR_CR_COMMAND_INDEX 0x04
+#define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host
+ must set to 1. When host finshes write
+ command to IR, host must clear to 0. */
+#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */
+#define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx,
+ 0x33 = rx + 1 tx */
+#define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */
+#define CIR_CR_BASE_ADDR_HI 0x60 /* MSB of CIR IO base addr */
+#define CIR_CR_BASE_ADDR_LO 0x61 /* LSB of CIR IO base addr */
+#define CIR_CR_IRQ_SEL 0x70 /* bits3-0 store CIR IRQ */
+#define CIR_CR_PSOUT_STATUS 0xf1
+#define CIR_CR_WAKE_KEY3_ADDR 0xf8
+#define CIR_CR_WAKE_KEY3_CODE 0xf9
+#define CIR_CR_WAKE_KEY3_DC 0xfa
+#define CIR_CR_WAKE_CONTROL 0xfb
+#define CIR_CR_WAKE_KEY12_ADDR 0xfc
+#define CIR_CR_WAKE_KEY4_ADDR 0xfd
+#define CIR_CR_WAKE_KEY5_ADDR 0xfe
+
+#define CLASS_RX_ONLY 0xff
+#define CLASS_RX_2TX 0x66
+#define CLASS_RX_1TX 0x33
+
+/* CIR device registers */
+#define CIR_STATUS 0x00
+#define CIR_RX_DATA 0x01
+#define CIR_TX_CONTROL 0x02
+#define CIR_TX_DATA 0x03
+#define CIR_CONTROL 0x04
+
+/* Bits to enable CIR wake */
+#define LOGICAL_DEV_ACPI 0x01
+#define LDEV_ACPI_WAKE_EN_REG 0xe8
+#define ACPI_WAKE_EN_CIR_BIT 0x04
+
+#define LDEV_ACPI_PME_EN_REG 0xf0
+#define LDEV_ACPI_PME_CLR_REG 0xf1
+#define ACPI_PME_CIR_BIT 0x02
+
+#define LDEV_ACPI_STATE_REG 0xf4
+#define ACPI_STATE_CIR_BIT 0x20
+
+/*
+ * CIR status register (0x00):
+ * 7 - CIR_IRQ_EN (1 = enable CIR IRQ, 0 = disable)
+ * 3 - TX_FINISH (1 when TX finished, write 1 to clear)
+ * 2 - TX_UNDERRUN (1 on TX underrun, write 1 to clear)
+ * 1 - RX_TIMEOUT (1 on RX timeout, write 1 to clear)
+ * 0 - RX_RECEIVE (1 on RX receive, write 1 to clear)
+ */
+#define CIR_STATUS_IRQ_EN 0x80
+#define CIR_STATUS_TX_FINISH 0x08
+#define CIR_STATUS_TX_UNDERRUN 0x04
+#define CIR_STATUS_RX_TIMEOUT 0x02
+#define CIR_STATUS_RX_RECEIVE 0x01
+#define CIR_STATUS_IRQ_MASK 0x0f
+
+/*
+ * CIR TX control register (0x02):
+ * 7 - TX_START (1 to indicate TX start, auto-cleared when done)
+ * 6 - TX_END (1 to indicate TX data written to TX fifo)
+ */
+#define CIR_TX_CONTROL_TX_START 0x80
+#define CIR_TX_CONTROL_TX_END 0x40
+
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index afae14fd152e..129d3f9a461d 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -14,81 +14,81 @@
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
- { 0xef12ba45, KEY_0 },
- { 0xef12a05f, KEY_1 },
- { 0xef12af50, KEY_2 },
- { 0xef12a25d, KEY_3 },
- { 0xef12be41, KEY_4 },
- { 0xef12f50a, KEY_5 },
- { 0xef12bd42, KEY_6 },
- { 0xef12b847, KEY_7 },
- { 0xef12b649, KEY_8 },
- { 0xef12fa05, KEY_9 },
- { 0xef12bc43, KEY_POWER },
- { 0xef12b946, KEY_SUBTITLE },
- { 0xef12f906, KEY_PAUSE },
- { 0xef12fc03, KEY_MEDIA_REPEAT},
- { 0xef12fd02, KEY_PAUSE },
- { 0xef12a15e, KEY_VOLUMEUP },
- { 0xef12a35c, KEY_VOLUMEDOWN },
- { 0xef12f609, KEY_CHANNELUP },
- { 0xef12e51a, KEY_CHANNELDOWN },
- { 0xef12e11e, KEY_PLAY },
- { 0xef12e41b, KEY_ZOOM },
- { 0xef12a659, KEY_MUTE },
- { 0xef12a55a, KEY_TV },
- { 0xef12e718, KEY_RECORD },
- { 0xef12f807, KEY_EPG },
- { 0xef12fe01, KEY_STOP },
+ { 0x10ed45, KEY_0 },
+ { 0x10ed5f, KEY_1 },
+ { 0x10ed50, KEY_2 },
+ { 0x10ed5d, KEY_3 },
+ { 0x10ed41, KEY_4 },
+ { 0x10ed0a, KEY_5 },
+ { 0x10ed42, KEY_6 },
+ { 0x10ed47, KEY_7 },
+ { 0x10ed49, KEY_8 },
+ { 0x10ed05, KEY_9 },
+ { 0x10ed43, KEY_POWER },
+ { 0x10ed46, KEY_SUBTITLE },
+ { 0x10ed06, KEY_PAUSE },
+ { 0x10ed03, KEY_MEDIA_REPEAT},
+ { 0x10ed02, KEY_PAUSE },
+ { 0x10ed5e, KEY_VOLUMEUP },
+ { 0x10ed5c, KEY_VOLUMEDOWN },
+ { 0x10ed09, KEY_CHANNELUP },
+ { 0x10ed1a, KEY_CHANNELDOWN },
+ { 0x10ed1e, KEY_PLAY },
+ { 0x10ed1b, KEY_ZOOM },
+ { 0x10ed59, KEY_MUTE },
+ { 0x10ed5a, KEY_TV },
+ { 0x10ed18, KEY_RECORD },
+ { 0x10ed07, KEY_EPG },
+ { 0x10ed01, KEY_STOP },
/* Type 2 - 20 buttons */
- { 0xff40ea15, KEY_0 },
- { 0xff40f708, KEY_1 },
- { 0xff40f609, KEY_2 },
- { 0xff40f50a, KEY_3 },
- { 0xff40f30c, KEY_4 },
- { 0xff40f20d, KEY_5 },
- { 0xff40f10e, KEY_6 },
- { 0xff40ef10, KEY_7 },
- { 0xff40ee11, KEY_8 },
- { 0xff40ed12, KEY_9 },
- { 0xff40ff00, KEY_POWER },
- { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
- { 0xff40e51a, KEY_PAUSE }, /* Timeshift */
- { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
- { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
- { 0xff40fe01, KEY_CHANNELUP },
- { 0xff40fa05, KEY_CHANNELDOWN },
- { 0xff40eb14, KEY_ZOOM },
- { 0xff40e718, KEY_RECORD },
- { 0xff40e916, KEY_STOP },
+ { 0xbf15, KEY_0 },
+ { 0xbf08, KEY_1 },
+ { 0xbf09, KEY_2 },
+ { 0xbf0a, KEY_3 },
+ { 0xbf0c, KEY_4 },
+ { 0xbf0d, KEY_5 },
+ { 0xbf0e, KEY_6 },
+ { 0xbf10, KEY_7 },
+ { 0xbf11, KEY_8 },
+ { 0xbf12, KEY_9 },
+ { 0xbf00, KEY_POWER },
+ { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
+ { 0xbf1a, KEY_PAUSE }, /* Timeshift */
+ { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+ { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+ { 0xbf01, KEY_CHANNELUP },
+ { 0xbf05, KEY_CHANNELDOWN },
+ { 0xbf14, KEY_ZOOM },
+ { 0xbf18, KEY_RECORD },
+ { 0xbf16, KEY_STOP },
/* Type 3 - 20 buttons */
- { 0xff00e31c, KEY_0 },
- { 0xff00f807, KEY_1 },
- { 0xff00ea15, KEY_2 },
- { 0xff00f609, KEY_3 },
- { 0xff00e916, KEY_4 },
- { 0xff00e619, KEY_5 },
- { 0xff00f20d, KEY_6 },
- { 0xff00f30c, KEY_7 },
- { 0xff00e718, KEY_8 },
- { 0xff00a15e, KEY_9 },
- { 0xff00ba45, KEY_POWER },
- { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
- { 0xff00b54a, KEY_PAUSE }, /* Timeshift */
- { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
- { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
- { 0xff00b946, KEY_CHANNELUP },
- { 0xff00bf40, KEY_CHANNELDOWN },
- { 0xff00f708, KEY_ZOOM },
- { 0xff00bd42, KEY_RECORD },
- { 0xff00a55a, KEY_STOP },
+ { 0x1c, KEY_0 },
+ { 0x07, KEY_1 },
+ { 0x15, KEY_2 },
+ { 0x09, KEY_3 },
+ { 0x16, KEY_4 },
+ { 0x19, KEY_5 },
+ { 0x0d, KEY_6 },
+ { 0x0c, KEY_7 },
+ { 0x18, KEY_8 },
+ { 0x5e, KEY_9 },
+ { 0x45, KEY_POWER },
+ { 0x44, KEY_MEDIA_REPEAT}, /* Recall */
+ { 0x4a, KEY_PAUSE }, /* Timeshift */
+ { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+ { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+ { 0x46, KEY_CHANNELUP },
+ { 0x40, KEY_CHANNELDOWN },
+ { 0x08, KEY_ZOOM },
+ { 0x42, KEY_RECORD },
+ { 0x5a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
- .rc_type = RC_TYPE_UNKNOWN,
+ .rc_type = RC_TYPE_NEC,
.name = RC_MAP_LME2510,
}
};
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 3be180b3ba27..bb53de7fe408 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -687,7 +687,7 @@ config VIDEO_HEXIUM_GEMINI
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_V4L2 && I2C && DMADEVICES
select DMA_ENGINE
select TIMB_DMA
select VIDEO_ADV7180
@@ -757,6 +757,8 @@ config VIDEO_NOON010PC30
---help---
This driver supports NOON010PC30 CIF camera from Siliconfile
+source "drivers/media/video/m5mols/Kconfig"
+
config VIDEO_OMAP3
tristate "OMAP 3 Camera support (EXPERIMENTAL)"
select OMAP_IOMMU
@@ -952,7 +954,7 @@ config VIDEO_SAMSUNG_S5P_FIMC
config VIDEO_S5P_MIPI_CSIS
tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
- depends on VIDEO_V4L2 && PM_RUNTIME && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
---help---
This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 9519160c2e01..f0fecd6f6a33 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
+obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 0073a8c55336..40eb6326e48a 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -438,7 +438,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
strcat(vc->card, " (676/");
break;
default:
- strcat(vc->card, " (???/");
+ strcat(vc->card, " (XXX/");
break;
}
switch (cam->params.version.sensor_flags) {
@@ -458,7 +458,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
strcat(vc->card, "500)");
break;
default:
- strcat(vc->card, "???)");
+ strcat(vc->card, "XXX)");
break;
}
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 2354336862cf..934185cca758 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -25,8 +25,8 @@
#include <linux/delay.h>
#include <media/cx25840.h>
#include <linux/firmware.h>
-#include <staging/altera.h>
+#include "../../../staging/altera-stapl/altera.h"
#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-init.h"
diff --git a/drivers/media/video/gspca/coarse_expo_autogain.h b/drivers/media/video/gspca/coarse_expo_autogain.h
deleted file mode 100644
index 1cb9d941eaf6..000000000000
--- a/drivers/media/video/gspca/coarse_expo_autogain.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Auto gain algorithm for camera's with a coarse exposure control
- *
- * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* Autogain + exposure algorithm for cameras with a coarse exposure control
- (usually this means we can only control the clockdiv to change exposure)
- As changing the clockdiv so that the fps drops from 30 to 15 fps for
- example, will lead to a huge exposure change (it effectively doubles),
- this algorithm normally tries to only adjust the gain (between 40 and
- 80 %) and if that does not help, only then changes exposure. This leads
- to a much more stable image then using the knee algorithm which at
- certain points of the knee graph will only try to adjust exposure,
- which leads to oscilating as one exposure step is huge.
-
- Note this assumes that the sd struct for the cam in question has
- exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
-
- Returns 0 if no changes were made, 1 if the gain and or exposure settings
- where changed. */
-static int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
- int avg_lum, int desired_avg_lum, int deadzone)
-{
- int i, steps, gain, orig_gain, exposure, orig_exposure;
- int gain_low, gain_high;
- const struct ctrl *gain_ctrl = NULL;
- const struct ctrl *exposure_ctrl = NULL;
- struct sd *sd = (struct sd *) gspca_dev;
- int retval = 0;
-
- for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
- if (gspca_dev->ctrl_dis & (1 << i))
- continue;
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
- gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
- exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
- }
- if (!gain_ctrl || !exposure_ctrl) {
- PDEBUG(D_ERR, "Error: gspca_coarse_grained_expo_autogain "
- "called on cam without gain or exposure");
- return 0;
- }
-
- if (gain_ctrl->get(gspca_dev, &gain) ||
- exposure_ctrl->get(gspca_dev, &exposure))
- return 0;
-
- orig_gain = gain;
- orig_exposure = exposure;
- gain_low =
- (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 2;
- gain_low += gain_ctrl->qctrl.minimum;
- gain_high =
- (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 4;
- gain_high += gain_ctrl->qctrl.minimum;
-
- /* If we are of a multiple of deadzone, do multiple steps to reach the
- desired lumination fast (with the risc of a slight overshoot) */
- steps = (desired_avg_lum - avg_lum) / deadzone;
-
- PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
- avg_lum, desired_avg_lum, steps);
-
- if ((gain + steps) > gain_high &&
- sd->exposure < exposure_ctrl->qctrl.maximum) {
- gain = gain_high;
- sd->exp_too_low_cnt++;
- } else if ((gain + steps) < gain_low &&
- sd->exposure > exposure_ctrl->qctrl.minimum) {
- gain = gain_low;
- sd->exp_too_high_cnt++;
- } else {
- gain += steps;
- if (gain > gain_ctrl->qctrl.maximum)
- gain = gain_ctrl->qctrl.maximum;
- else if (gain < gain_ctrl->qctrl.minimum)
- gain = gain_ctrl->qctrl.minimum;
- sd->exp_too_high_cnt = 0;
- sd->exp_too_low_cnt = 0;
- }
-
- if (sd->exp_too_high_cnt > 3) {
- exposure--;
- sd->exp_too_high_cnt = 0;
- } else if (sd->exp_too_low_cnt > 3) {
- exposure++;
- sd->exp_too_low_cnt = 0;
- }
-
- if (gain != orig_gain) {
- gain_ctrl->set(gspca_dev, gain);
- retval = 1;
- }
- if (exposure != orig_exposure) {
- exposure_ctrl->set(gspca_dev, exposure);
- retval = 1;
- }
-
- return retval;
-}
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 66671a4092e4..26fc206f095e 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver");
MODULE_LICENSE("GPL");
-#ifdef DEBUG
+#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK |
D_USBI | D_USBO | D_V4L2;
#endif
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 36a46fc78734..057e287b9152 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -609,7 +609,7 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
* buffers, there are some pretty strict real time constraints for
* isochronous transfer for larger frame sizes).
*/
-/*jfm: this value works well for 1600x1200, but not 800x600 - see isoc_init */
+/*jfm: this value does not work for 800x600 - see isoc_init */
#define OVFX2_BULK_SIZE (13 * 4096)
/* I2C registers */
@@ -3307,6 +3307,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
+ sd->frame_rate = 15;
return 0;
}
@@ -3469,7 +3470,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
ARRAY_SIZE(init_519_ov7660));
write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660));
sd->gspca_dev.curr_mode = 1; /* 640x480 */
- sd->frame_rate = 15;
ov519_set_mode(sd);
ov519_set_fr(sd);
sd->ctrls[COLORS].max = 4; /* 0..4 */
@@ -3511,7 +3511,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
switch (sd->bridge) {
case BRIDGE_OVFX2:
- if (gspca_dev->width == 1600)
+ if (gspca_dev->width != 800)
gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
else
gspca_dev->cam.bulk_size = 7 * 4096;
@@ -4478,7 +4478,7 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
/* A short read signals EOF */
- if (len < OVFX2_BULK_SIZE) {
+ if (len < gspca_dev->cam.bulk_size) {
/* If the frame is short, and it is one of the first ones
the sensor and bridge are still syncing, so drop it. */
if (sd->first_frame) {
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 6415aff5cbd1..81b8a600783b 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -60,7 +60,7 @@ struct sd {
u32 pktsz; /* (used by pkt_scan) */
u16 npkt;
- u8 nchg;
+ s8 nchg;
s8 short_mark;
u8 quality; /* image quality */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index b538dce96f78..a14a84a5079b 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -125,7 +125,7 @@
#define HDCS_SLEEP_MODE (1 << 1)
#define HDCS_DEFAULT_EXPOSURE 48
-#define HDCS_DEFAULT_GAIN 128
+#define HDCS_DEFAULT_GAIN 50
static int hdcs_probe_1x00(struct sd *sd);
static int hdcs_probe_1020(struct sd *sd);
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index a4e4dfdbc2f2..0fb75524484d 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1328,6 +1328,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
if (!itv->has_cx23415)
write_reg_sync(0x03, IVTV_REG_DMACONTROL);
+ ivtv_s_std_enc(itv, &itv->tuner_std);
+
/* Default interrupts enabled. For the PVR350 this includes the
decoder VSYNC interrupt, which is always on. It is not only used
during decoding but also by the OSD.
@@ -1336,12 +1338,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
ivtv_set_osd_alpha(itv);
- }
- else
+ ivtv_s_std_dec(itv, &itv->tuner_std);
+ } else {
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
-
- /* For cards with video out, this call needs interrupts enabled */
- ivtv_s_std(NULL, &fh, &itv->tuner_std);
+ }
/* Setup initial controls */
cx2341x_handler_setup(&itv->cxhdl);
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
index 14a1cea1d70d..02c5adebf517 100644
--- a/drivers/media/video/ivtv/ivtv-firmware.c
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -280,8 +280,6 @@ int ivtv_firmware_restart(struct ivtv *itv)
{
int rc = 0;
v4l2_std_id std;
- struct ivtv_open_id fh;
- fh.itv = itv;
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
/* Display test image during restart */
@@ -301,14 +299,19 @@ int ivtv_firmware_restart(struct ivtv *itv)
/* Allow settings to reload */
ivtv_mailbox_cache_invalidate(itv);
- /* Restore video standard */
+ /* Restore encoder video standard */
std = itv->std;
itv->std = 0;
- ivtv_s_std(NULL, &fh, &std);
+ ivtv_s_std_enc(itv, &std);
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
ivtv_init_mpeg_decoder(itv);
+ /* Restore decoder video standard */
+ std = itv->std_out;
+ itv->std_out = 0;
+ ivtv_s_std_dec(itv, &std);
+
/* Restore framebuffer if active */
if (itv->ivtvfb_restore)
itv->ivtvfb_restore(itv);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 1689783cd19a..f9e347dae739 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1071,28 +1071,8 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
return 0;
}
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std)
{
- DEFINE_WAIT(wait);
- struct ivtv *itv = fh2id(fh)->itv;
- struct yuv_playback_info *yi = &itv->yuv_info;
- int f;
-
- if ((*std & V4L2_STD_ALL) == 0)
- return -EINVAL;
-
- if (*std == itv->std)
- return 0;
-
- if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
- atomic_read(&itv->capturing) > 0 ||
- atomic_read(&itv->decoding) > 0) {
- /* Switching standard would turn off the radio or mess
- with already running streams, prevent that by
- returning EBUSY. */
- return -EBUSY;
- }
-
itv->std = *std;
itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
itv->is_50hz = !itv->is_60hz;
@@ -1106,48 +1086,79 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
if (itv->hw_flags & IVTV_HW_CX25840)
itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
- IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std);
-
/* Tuner */
ivtv_call_all(itv, core, s_std, itv->std);
+}
- if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
- /* set display standard */
- itv->std_out = *std;
- itv->is_out_60hz = itv->is_60hz;
- itv->is_out_50hz = itv->is_50hz;
- ivtv_call_all(itv, video, s_std_output, itv->std_out);
-
- /*
- * The next firmware call is time sensitive. Time it to
- * avoid risk of a hard lock, by trying to ensure the call
- * happens within the first 100 lines of the top field.
- * Make 4 attempts to sync to the decoder before giving up.
- */
- for (f = 0; f < 4; f++) {
- prepare_to_wait(&itv->vsync_waitq, &wait,
- TASK_UNINTERRUPTIBLE);
- if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
- break;
- schedule_timeout(msecs_to_jiffies(25));
- }
- finish_wait(&itv->vsync_waitq, &wait);
-
- if (f == 4)
- IVTV_WARN("Mode change failed to sync to decoder\n");
-
- ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
- itv->main_rect.left = itv->main_rect.top = 0;
- itv->main_rect.width = 720;
- itv->main_rect.height = itv->cxhdl.height;
- ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
- 720, itv->main_rect.height, 0, 0);
- yi->main_rect = itv->main_rect;
- if (!itv->osd_info) {
- yi->osd_full_w = 720;
- yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
- }
+void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
+{
+ struct yuv_playback_info *yi = &itv->yuv_info;
+ DEFINE_WAIT(wait);
+ int f;
+
+ /* set display standard */
+ itv->std_out = *std;
+ itv->is_out_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
+ itv->is_out_50hz = !itv->is_out_60hz;
+ ivtv_call_all(itv, video, s_std_output, itv->std_out);
+
+ /*
+ * The next firmware call is time sensitive. Time it to
+ * avoid risk of a hard lock, by trying to ensure the call
+ * happens within the first 100 lines of the top field.
+ * Make 4 attempts to sync to the decoder before giving up.
+ */
+ for (f = 0; f < 4; f++) {
+ prepare_to_wait(&itv->vsync_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+ schedule_timeout(msecs_to_jiffies(25));
}
+ finish_wait(&itv->vsync_waitq, &wait);
+
+ if (f == 4)
+ IVTV_WARN("Mode change failed to sync to decoder\n");
+
+ ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
+ itv->main_rect.left = 0;
+ itv->main_rect.top = 0;
+ itv->main_rect.width = 720;
+ itv->main_rect.height = itv->is_out_50hz ? 576 : 480;
+ ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
+ 720, itv->main_rect.height, 0, 0);
+ yi->main_rect = itv->main_rect;
+ if (!itv->osd_info) {
+ yi->osd_full_w = 720;
+ yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
+ }
+}
+
+int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct ivtv *itv = fh2id(fh)->itv;
+
+ if ((*std & V4L2_STD_ALL) == 0)
+ return -EINVAL;
+
+ if (*std == itv->std)
+ return 0;
+
+ if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
+ atomic_read(&itv->capturing) > 0 ||
+ atomic_read(&itv->decoding) > 0) {
+ /* Switching standard would mess with already running
+ streams, prevent that by returning EBUSY. */
+ return -EBUSY;
+ }
+
+ IVTV_DEBUG_INFO("Switching standard to %llx.\n",
+ (unsigned long long)itv->std);
+
+ ivtv_s_std_enc(itv, std);
+ if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
+ ivtv_s_std_dec(itv, std);
+
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 58f003412afd..89185caeafae 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -27,7 +27,8 @@ u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
void ivtv_set_osd_alpha(struct ivtv *itv);
int ivtv_set_speed(struct ivtv *itv, int speed);
void ivtv_set_funcs(struct video_device *vdev);
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
+void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std);
+void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std);
int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 942683336555..e7794dc1330e 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -589,7 +589,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
- ivtv_msleep_timeout(300, 1);
+ ivtv_msleep_timeout(300, 0);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
}
@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
}
/* Handle any pending interrupts */
- ivtv_msleep_timeout(100, 1);
+ ivtv_msleep_timeout(100, 0);
}
atomic_dec(&itv->capturing);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index b6eb51ce7735..293db806d936 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -71,7 +71,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
Turning this signal on and off can confuse certain
TVs. As far as I can tell there is no reason not to
transmit this signal. */
- if ((itv->std & V4L2_STD_625_50) && !enabled) {
+ if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
enabled = 1;
mode = 0x08; /* 4x3 full format */
}
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 17247451c693..6b7c9c823330 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -247,7 +247,7 @@ static int ivtvfb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords
static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window)
{
- int osd_height_limit = itv->is_50hz ? 576 : 480;
+ int osd_height_limit = itv->is_out_50hz ? 576 : 480;
/* Only fail if resolution too high, otherwise fudge the start coords. */
if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH))
@@ -471,9 +471,9 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
- if (itv->is_50hz && trace > 312)
+ if (itv->is_out_50hz && trace > 312)
trace -= 312;
- else if (itv->is_60hz && trace > 262)
+ else if (itv->is_out_60hz && trace > 262)
trace -= 262;
if (trace == 1)
vblank.flags |= FB_VBLANK_VSYNCING;
@@ -656,7 +656,7 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
/* Set base references for mode calcs. */
- if (itv->is_50hz) {
+ if (itv->is_out_50hz) {
pixclock = 84316;
hlimit = 776;
vlimit = 591;
@@ -784,12 +784,12 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
If the margins are too large, just center the screen
(enforcing margins causes too many problems) */
- if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1) {
+ if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1)
var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2);
- }
- if (var->upper_margin + var->yres > (itv->is_50hz ? 577 : 481)) {
- var->upper_margin = 1 + (((itv->is_50hz ? 576 : 480) - var->yres) / 2);
- }
+
+ if (var->upper_margin + var->yres > (itv->is_out_50hz ? 577 : 481))
+ var->upper_margin = 1 + (((itv->is_out_50hz ? 576 : 480) -
+ var->yres) / 2);
/* Maintain overall 'size' for a constant refresh rate */
var->right_margin = hlimit - var->left_margin - var->xres;
@@ -836,7 +836,12 @@ static int ivtvfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *inf
u32 osd_pan_index;
struct ivtv *itv = (struct ivtv *) info->par;
- osd_pan_index = (var->xoffset + (var->yoffset * var->xres_virtual))*var->bits_per_pixel/8;
+ if (var->yoffset + info->var.yres > info->var.yres_virtual ||
+ var->xoffset + info->var.xres > info->var.xres_virtual)
+ return -EINVAL;
+
+ osd_pan_index = var->yoffset * info->fix.line_length
+ + var->xoffset * info->var.bits_per_pixel / 8;
write_reg(osd_pan_index, 0x02A0C);
/* Pass this info back the yuv handler */
@@ -1003,19 +1008,21 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
/* Hardware coords start at 0, user coords start at 1. */
osd_left--;
- start_window.left = osd_left >= 0 ? osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
+ start_window.left = osd_left >= 0 ?
+ osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
oi->display_byte_stride =
start_window.width * oi->bytes_per_pixel;
/* Vertical size & position */
- max_height = itv->is_50hz ? 576 : 480;
+ max_height = itv->is_out_50hz ? 576 : 480;
if (osd_yres > max_height)
osd_yres = max_height;
- start_window.height = osd_yres ? osd_yres : itv->is_50hz ? 480 : 400;
+ start_window.height = osd_yres ?
+ osd_yres : itv->is_out_50hz ? 480 : 400;
/* Check vertical start (osd_upper). */
if (osd_upper + start_window.height > max_height + 1) {
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig
new file mode 100644
index 000000000000..302dc3d70193
--- /dev/null
+++ b/drivers/media/video/m5mols/Kconfig
@@ -0,0 +1,5 @@
+config VIDEO_M5MOLS
+ tristate "Fujitsu M-5MOLS 8MP sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/video/m5mols/Makefile b/drivers/media/video/m5mols/Makefile
new file mode 100644
index 000000000000..0a44e028edc7
--- /dev/null
+++ b/drivers/media/video/m5mols/Makefile
@@ -0,0 +1,3 @@
+m5mols-objs := m5mols_core.o m5mols_controls.o m5mols_capture.o
+
+obj-$(CONFIG_VIDEO_M5MOLS) += m5mols.o
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
new file mode 100644
index 000000000000..10b55c854487
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -0,0 +1,296 @@
+/*
+ * Header for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef M5MOLS_H
+#define M5MOLS_H
+
+#include <media/v4l2-subdev.h>
+#include "m5mols_reg.h"
+
+extern int m5mols_debug;
+
+#define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd)
+
+#define to_sd(__ctrl) \
+ (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
+
+enum m5mols_restype {
+ M5MOLS_RESTYPE_MONITOR,
+ M5MOLS_RESTYPE_CAPTURE,
+ M5MOLS_RESTYPE_MAX,
+};
+
+/**
+ * struct m5mols_resolution - structure for the resolution
+ * @type: resolution type according to the pixel code
+ * @width: width of the resolution
+ * @height: height of the resolution
+ * @reg: resolution preset register value
+ */
+struct m5mols_resolution {
+ u8 reg;
+ enum m5mols_restype type;
+ u16 width;
+ u16 height;
+};
+
+/**
+ * struct m5mols_exif - structure for the EXIF information of M-5MOLS
+ * @exposure_time: exposure time register value
+ * @shutter_speed: speed of the shutter register value
+ * @aperture: aperture register value
+ * @exposure_bias: it calls also EV bias
+ * @iso_speed: ISO register value
+ * @flash: status register value of the flash
+ * @sdr: status register value of the Subject Distance Range
+ * @qval: not written exact meaning in document
+ */
+struct m5mols_exif {
+ u32 exposure_time;
+ u32 shutter_speed;
+ u32 aperture;
+ u32 brightness;
+ u32 exposure_bias;
+ u16 iso_speed;
+ u16 flash;
+ u16 sdr;
+ u16 qval;
+};
+
+/**
+ * struct m5mols_capture - Structure for the capture capability
+ * @exif: EXIF information
+ * @main: size in bytes of the main image
+ * @thumb: size in bytes of the thumb image, if it was accompanied
+ * @total: total size in bytes of the produced image
+ */
+struct m5mols_capture {
+ struct m5mols_exif exif;
+ u32 main;
+ u32 thumb;
+ u32 total;
+};
+
+/**
+ * struct m5mols_scenemode - structure for the scenemode capability
+ * @metering: metering light register value
+ * @ev_bias: EV bias register value
+ * @wb_mode: mode which means the WhiteBalance is Auto or Manual
+ * @wb_preset: whitebalance preset register value in the Manual mode
+ * @chroma_en: register value whether the Chroma capability is enabled or not
+ * @chroma_lvl: chroma's level register value
+ * @edge_en: register value Whether the Edge capability is enabled or not
+ * @edge_lvl: edge's level register value
+ * @af_range: Auto Focus's range
+ * @fd_mode: Face Detection mode
+ * @mcc: Multi-axis Color Conversion which means emotion color
+ * @light: status of the Light
+ * @flash: status of the Flash
+ * @tone: Tone color which means Contrast
+ * @iso: ISO register value
+ * @capt_mode: Mode of the Image Stabilization while the camera capturing
+ * @wdr: Wide Dynamic Range register value
+ *
+ * The each value according to each scenemode is recommended in the documents.
+ */
+struct m5mols_scenemode {
+ u32 metering;
+ u32 ev_bias;
+ u32 wb_mode;
+ u32 wb_preset;
+ u32 chroma_en;
+ u32 chroma_lvl;
+ u32 edge_en;
+ u32 edge_lvl;
+ u32 af_range;
+ u32 fd_mode;
+ u32 mcc;
+ u32 light;
+ u32 flash;
+ u32 tone;
+ u32 iso;
+ u32 capt_mode;
+ u32 wdr;
+};
+
+/**
+ * struct m5mols_version - firmware version information
+ * @customer: customer information
+ * @project: version of project information according to customer
+ * @fw: firmware revision
+ * @hw: hardware revision
+ * @param: version of the parameter
+ * @awb: Auto WhiteBalance algorithm version
+ * @str: information about manufacturer and packaging vendor
+ * @af: Auto Focus version
+ *
+ * The register offset starts the customer version at 0x0, and it ends
+ * the awb version at 0x09. The customer, project information occupies 1 bytes
+ * each. And also the fw, hw, param, awb each requires 2 bytes. The str is
+ * unique string associated with firmware's version. It includes information
+ * about manufacturer and the vendor of the sensor's packaging. The least
+ * significant 2 bytes of the string indicate packaging manufacturer.
+ */
+#define VERSION_STRING_SIZE 22
+struct m5mols_version {
+ u8 customer;
+ u8 project;
+ u16 fw;
+ u16 hw;
+ u16 param;
+ u16 awb;
+ u8 str[VERSION_STRING_SIZE];
+ u8 af;
+};
+#define VERSION_SIZE sizeof(struct m5mols_version)
+
+/**
+ * struct m5mols_info - M-5MOLS driver data structure
+ * @pdata: platform data
+ * @sd: v4l-subdev instance
+ * @pad: media pad
+ * @ffmt: current fmt according to resolution type
+ * @res_type: current resolution type
+ * @code: current code
+ * @irq_waitq: waitqueue for the capture
+ * @work_irq: workqueue for the IRQ
+ * @flags: state variable for the interrupt handler
+ * @handle: control handler
+ * @autoexposure: Auto Exposure control
+ * @exposure: Exposure control
+ * @autowb: Auto White Balance control
+ * @colorfx: Color effect control
+ * @saturation: Saturation control
+ * @zoom: Zoom control
+ * @ver: information of the version
+ * @cap: the capture mode attributes
+ * @power: current sensor's power status
+ * @ctrl_sync: true means all controls of the sensor are initialized
+ * @int_capture: true means the capture interrupt is issued once
+ * @lock_ae: true means the Auto Exposure is locked
+ * @lock_awb: true means the Aut WhiteBalance is locked
+ * @resolution: register value for current resolution
+ * @interrupt: register value for current interrupt status
+ * @mode: register value for current operation mode
+ * @mode_save: register value for current operation mode for saving
+ * @set_power: optional power callback to the board code
+ */
+struct m5mols_info {
+ const struct m5mols_platform_data *pdata;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
+ int res_type;
+ enum v4l2_mbus_pixelcode code;
+ wait_queue_head_t irq_waitq;
+ struct work_struct work_irq;
+ unsigned long flags;
+
+ struct v4l2_ctrl_handler handle;
+ /* Autoexposure/exposure control cluster */
+ struct {
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct v4l2_ctrl *autowb;
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *zoom;
+
+ struct m5mols_version ver;
+ struct m5mols_capture cap;
+ bool power;
+ bool ctrl_sync;
+ bool lock_ae;
+ bool lock_awb;
+ u8 resolution;
+ u32 interrupt;
+ u32 mode;
+ u32 mode_save;
+ int (*set_power)(struct device *dev, int on);
+};
+
+#define ST_CAPT_IRQ 0
+
+#define is_powered(__info) (__info->power)
+#define is_ctrl_synced(__info) (__info->ctrl_sync)
+#define is_available_af(__info) (__info->ver.af)
+#define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code)
+#define is_manufacturer(__info, __manufacturer) \
+ (__info->ver.str[0] == __manufacturer[0] && \
+ __info->ver.str[1] == __manufacturer[1])
+/*
+ * I2C operation of the M-5MOLS
+ *
+ * The I2C read operation of the M-5MOLS requires 2 messages. The first
+ * message sends the information about the command, command category, and total
+ * message size. The second message is used to retrieve the data specifed in
+ * the first message
+ *
+ * 1st message 2nd message
+ * +-------+---+----------+-----+-------+ +------+------+------+------+
+ * | size1 | R | category | cmd | size2 | | d[0] | d[1] | d[2] | d[3] |
+ * +-------+---+----------+-----+-------+ +------+------+------+------+
+ * - size1: message data size(5 in this case)
+ * - size2: desired buffer size of the 2nd message
+ * - d[0..3]: according to size2
+ *
+ * The I2C write operation needs just one message. The message includes
+ * category, command, total size, and desired data.
+ *
+ * 1st message
+ * +-------+---+----------+-----+------+------+------+------+
+ * | size1 | W | category | cmd | d[0] | d[1] | d[2] | d[3] |
+ * +-------+---+----------+-----+------+------+------+------+
+ * - d[0..3]: according to size1
+ */
+int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+
+/*
+ * Mode operation of the M-5MOLS
+ *
+ * Changing the mode of the M-5MOLS is needed right executing order.
+ * There are three modes(PARAMETER, MONITOR, CAPTURE) which can be changed
+ * by user. There are various categories associated with each mode.
+ *
+ * +============================================================+
+ * | mode | category |
+ * +============================================================+
+ * | FLASH | FLASH(only after Stand-by or Power-on) |
+ * | SYSTEM | SYSTEM(only after sensor arm-booting) |
+ * | PARAMETER | PARAMETER |
+ * | MONITOR | MONITOR(preview), Auto Focus, Face Detection |
+ * | CAPTURE | Single CAPTURE, Preview(recording) |
+ * +============================================================+
+ *
+ * The available executing order between each modes are as follows:
+ * PARAMETER <---> MONITOR <---> CAPTURE
+ */
+int m5mols_mode(struct m5mols_info *info, u32 mode);
+
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_sync_controls(struct m5mols_info *info);
+int m5mols_start_capture(struct m5mols_info *info);
+int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_lock_3a(struct m5mols_info *info, bool lock);
+int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
+
+/* The firmware function */
+int m5mols_update_fw(struct v4l2_subdev *sd,
+ int (*set_power)(struct m5mols_info *, bool));
+
+#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
new file mode 100644
index 000000000000..d71a3903b60f
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -0,0 +1,191 @@
+/*
+ * The Capture code for Fujitsu M-5MOLS ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/m5mols.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+static int m5mols_capture_error_handler(struct m5mols_info *info,
+ int timeout)
+{
+ int ret;
+
+ /* Disable all interrupts and clear relevant interrupt staus bits */
+ ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE,
+ info->interrupt & ~(REG_INT_CAPTURE));
+ if (ret)
+ return ret;
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+/**
+ * m5mols_read_rational - I2C read of a rational number
+ *
+ * Read numerator and denominator from registers @addr_num and @addr_den
+ * respectively and return the division result in @val.
+ */
+static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
+ u32 addr_den, u32 *val)
+{
+ u32 num, den;
+
+ int ret = m5mols_read(sd, addr_num, &num);
+ if (!ret)
+ ret = m5mols_read(sd, addr_den, &den);
+ if (ret)
+ return ret;
+ *val = den == 0 ? 0 : num / den;
+ return ret;
+}
+
+/**
+ * m5mols_capture_info - Gather captured image information
+ *
+ * For now it gathers only EXIF information and file size.
+ */
+static int m5mols_capture_info(struct m5mols_info *info)
+{
+ struct m5mols_exif *exif = &info->cap.exif;
+ struct v4l2_subdev *sd = &info->sd;
+ int ret;
+
+ ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU,
+ EXIF_INFO_EXPTIME_DE, &exif->exposure_time);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE,
+ &exif->shutter_speed);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE,
+ &exif->aperture);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE,
+ &exif->brightness);
+ if (ret)
+ return ret;
+ ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE,
+ &exif->exposure_bias);
+ if (ret)
+ return ret;
+
+ ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+ if (!ret)
+ ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+ if (!ret)
+ ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+ if (!ret)
+ ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+ if (ret)
+ return ret;
+
+ if (!ret)
+ ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+ if (!ret)
+ ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+ if (!ret)
+ info->cap.total = info->cap.main + info->cap.thumb;
+
+ return ret;
+}
+
+int m5mols_start_capture(struct m5mols_info *info)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ u32 resolution = info->resolution;
+ int timeout;
+ int ret;
+
+ /*
+ * Preparing capture. Setting control & interrupt before entering
+ * capture mode
+ *
+ * 1) change to MONITOR mode for operating control & interrupt
+ * 2) set controls (considering v4l2_control value & lock 3A)
+ * 3) set interrupt
+ * 4) change to CAPTURE mode
+ */
+ ret = m5mols_mode(info, REG_MONITOR);
+ if (!ret)
+ ret = m5mols_sync_controls(info);
+ if (!ret)
+ ret = m5mols_lock_3a(info, true);
+ if (!ret)
+ ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+ if (!ret)
+ ret = m5mols_mode(info, REG_CAPTURE);
+ if (!ret) {
+ /* Wait for capture interrupt, after changing capture mode */
+ timeout = wait_event_interruptible_timeout(info->irq_waitq,
+ test_bit(ST_CAPT_IRQ, &info->flags),
+ msecs_to_jiffies(2000));
+ if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags))
+ ret = m5mols_capture_error_handler(info, timeout);
+ }
+ if (!ret)
+ ret = m5mols_lock_3a(info, false);
+ if (ret)
+ return ret;
+ /*
+ * Starting capture. Setting capture frame count and resolution and
+ * the format(available format: JPEG, Bayer RAW, YUV).
+ *
+ * 1) select single or multi(enable to 25), format, size
+ * 2) set interrupt
+ * 3) start capture(for main image, now)
+ * 4) get information
+ * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device)
+ */
+ ret = m5mols_write(sd, CAPC_SEL_FRAME, 1);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
+ if (!ret)
+ ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+ if (!ret)
+ ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN);
+ if (!ret) {
+ /* Wait for the capture completion interrupt */
+ timeout = wait_event_interruptible_timeout(info->irq_waitq,
+ test_bit(ST_CAPT_IRQ, &info->flags),
+ msecs_to_jiffies(2000));
+ if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) {
+ ret = m5mols_capture_info(info);
+ if (!ret)
+ v4l2_subdev_notify(sd, 0, &info->cap.total);
+ }
+ }
+
+ return m5mols_capture_error_handler(info, timeout);
+}
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
new file mode 100644
index 000000000000..817c16fec368
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -0,0 +1,299 @@
+/*
+ * Controls for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+static struct m5mols_scenemode m5mols_default_scenemode[] = {
+ [REG_SCENE_NORMAL] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_NORMAL, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 5, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_PORTRAIT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 4,
+ REG_AF_NORMAL, BIT_FD_EN | BIT_FD_DRAW_FACE_FRAME,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_LANDSCAPE] = {
+ REG_AE_ALL, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 4, REG_EDGE_ON, 6,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_SPORTS] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_PARTY_INDOOR] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_200, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_BEACH_SNOW] = {
+ REG_AE_CENTER, REG_AE_INDEX_10_POS, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_SUNSET] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
+ REG_AWB_DAYLIGHT,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_DAWN_DUSK] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
+ REG_AWB_FLUORESCENT_1,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_FALL] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 5, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_NIGHT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_AGAINST_LIGHT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_FIRE] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
+ },
+ [REG_SCENE_TEXT] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 7,
+ REG_AF_MACRO, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_ANTI_SHAKE, REG_WDR_ON,
+ },
+ [REG_SCENE_CANDLE] = {
+ REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+ REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+ REG_AF_NORMAL, REG_FD_OFF,
+ REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+ 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+ },
+};
+
+/**
+ * m5mols_do_scenemode() - Change current scenemode
+ * @mode: Desired mode of the scenemode
+ *
+ * WARNING: The execution order is important. Do not change the order.
+ */
+int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
+ int ret;
+
+ if (mode > REG_SCENE_CANDLE)
+ return -EINVAL;
+
+ ret = m5mols_lock_3a(info, false);
+ if (!ret)
+ ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
+ if (!ret)
+ ret = m5mols_write(sd, AE_EV_PRESET_CAPTURE, mode);
+ if (!ret)
+ ret = m5mols_write(sd, AE_MODE, scenemode.metering);
+ if (!ret)
+ ret = m5mols_write(sd, AE_INDEX, scenemode.ev_bias);
+ if (!ret)
+ ret = m5mols_write(sd, AWB_MODE, scenemode.wb_mode);
+ if (!ret)
+ ret = m5mols_write(sd, AWB_MANUAL, scenemode.wb_preset);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CHROMA_EN, scenemode.chroma_en);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CHROMA_LVL, scenemode.chroma_lvl);
+ if (!ret)
+ ret = m5mols_write(sd, MON_EDGE_EN, scenemode.edge_en);
+ if (!ret)
+ ret = m5mols_write(sd, MON_EDGE_LVL, scenemode.edge_lvl);
+ if (!ret && is_available_af(info))
+ ret = m5mols_write(sd, AF_MODE, scenemode.af_range);
+ if (!ret && is_available_af(info))
+ ret = m5mols_write(sd, FD_CTL, scenemode.fd_mode);
+ if (!ret)
+ ret = m5mols_write(sd, MON_TONE_CTL, scenemode.tone);
+ if (!ret)
+ ret = m5mols_write(sd, AE_ISO, scenemode.iso);
+ if (!ret)
+ ret = m5mols_mode(info, REG_CAPTURE);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_MCC_MODE, scenemode.mcc);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_LIGHT_CTRL, scenemode.light);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_FLASH_CTRL, scenemode.flash);
+ if (!ret)
+ ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
+ if (!ret)
+ ret = m5mols_mode(info, REG_MONITOR);
+
+ return ret;
+}
+
+static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
+{
+ int ret = 0;
+
+ if (info->lock_ae != lock)
+ ret = m5mols_write(&info->sd, AE_LOCK,
+ lock ? REG_AE_LOCK : REG_AE_UNLOCK);
+ if (!ret)
+ info->lock_ae = lock;
+
+ return ret;
+}
+
+static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
+{
+ int ret = 0;
+
+ if (info->lock_awb != lock)
+ ret = m5mols_write(&info->sd, AWB_LOCK,
+ lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
+ if (!ret)
+ info->lock_awb = lock;
+
+ return ret;
+}
+
+/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
+int m5mols_lock_3a(struct m5mols_info *info, bool lock)
+{
+ int ret;
+
+ ret = m5mols_lock_ae(info, lock);
+ if (!ret)
+ ret = m5mols_lock_awb(info, lock);
+ /* Don't need to handle unlocking AF */
+ if (!ret && is_available_af(info) && lock)
+ ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+
+ return ret;
+}
+
+/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
+int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ return m5mols_write(sd, MON_ZOOM, ctrl->val);
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = m5mols_lock_ae(info,
+ ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
+ if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
+ ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
+ if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
+ int val = info->exposure->val;
+ ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
+ if (!ret)
+ ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
+ if (!ret)
+ ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
+ }
+ return ret;
+
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = m5mols_lock_awb(info, ctrl->val ? false : true);
+ if (!ret)
+ ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
+ REG_AWB_AUTO : REG_AWB_PRESET);
+ return ret;
+
+ case V4L2_CID_SATURATION:
+ ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
+ return ret;
+
+ case V4L2_CID_COLORFX:
+ /*
+ * This control uses two kinds of registers: normal & color.
+ * The normal effect belongs to category 1, while the color
+ * one belongs to category 2.
+ *
+ * The normal effect uses one register: CAT1_EFFECT.
+ * The color effect uses three registers:
+ * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
+ */
+ ret = m5mols_write(sd, PARM_EFFECT,
+ ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
+ ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
+ REG_EFFECT_OFF);
+ if (!ret)
+ ret = m5mols_write(sd, MON_EFFECT,
+ ctrl->val == V4L2_COLORFX_SEPIA ?
+ REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CFIXR,
+ ctrl->val == V4L2_COLORFX_SEPIA ?
+ REG_CFIXR_SEPIA : 0);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CFIXB,
+ ctrl->val == V4L2_COLORFX_SEPIA ?
+ REG_CFIXB_SEPIA : 0);
+ return ret;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
new file mode 100644
index 000000000000..76eac26e84ae
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -0,0 +1,1004 @@
+/*
+ * Driver for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/m5mols.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+int m5mols_debug;
+module_param(m5mols_debug, int, 0644);
+
+#define MODULE_NAME "M5MOLS"
+#define M5MOLS_I2C_CHECK_RETRY 500
+
+/* The regulator consumer names for external voltage regulators */
+static struct regulator_bulk_data supplies[] = {
+ {
+ .supply = "core", /* ARM core power, 1.2V */
+ }, {
+ .supply = "dig_18", /* digital power 1, 1.8V */
+ }, {
+ .supply = "d_sensor", /* sensor power 1, 1.8V */
+ }, {
+ .supply = "dig_28", /* digital power 2, 2.8V */
+ }, {
+ .supply = "a_sensor", /* analog power */
+ }, {
+ .supply = "dig_12", /* digital power 3, 1.2V */
+ },
+};
+
+static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
+ [M5MOLS_RESTYPE_MONITOR] = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+ [M5MOLS_RESTYPE_CAPTURE] = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
+};
+#define SIZE_DEFAULT_FFMT ARRAY_SIZE(m5mols_default_ffmt)
+
+static const struct m5mols_resolution m5mols_reg_res[] = {
+ { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 }, /* SUB-QCIF */
+ { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 }, /* QQVGA */
+ { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 }, /* QCIF */
+ { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 },
+ { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 }, /* QVGA */
+ { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 }, /* QVGA */
+ { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 }, /* WQVGA */
+ { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 }, /* WQVGA */
+ { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 }, /* CIF */
+ { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 },
+ { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 }, /* qHD */
+ { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 }, /* VGA */
+ { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 },
+ { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 }, /* WVGA */
+ { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 }, /* SVGA */
+ { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 }, /* HD */
+ { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 }, /* 1080p */
+ { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 }, /* 2.63fps 8M */
+ { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 }, /* AHS_MON debug */
+
+ { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 }, /* QVGA */
+ { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 }, /* WQVGA */
+ { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 },
+ { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 }, /* qHD */
+ { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 }, /* VGA */
+ { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 }, /* WVGA */
+ { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 }, /* HD */
+ { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 }, /* 1M */
+ { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 }, /* 2M */
+ { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 }, /* Full-HD */
+ { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 }, /* 3Mega */
+ { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 },
+ { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 }, /* 4Mega */
+ { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 },
+ { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 }, /* 5Mega */
+ { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 }, /* 6Mega */
+ { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 },
+ { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 }, /* 8Mega */
+};
+
+/**
+ * m5mols_swap_byte - an byte array to integer conversion function
+ * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
+ *
+ * Convert I2C data byte array with performing any required byte
+ * reordering to assure proper values for each data type, regardless
+ * of the architecture endianness.
+ */
+static u32 m5mols_swap_byte(u8 *data, u8 length)
+{
+ if (length == 1)
+ return *data;
+ else if (length == 2)
+ return be16_to_cpu(*((u16 *)data));
+ else
+ return be32_to_cpu(*((u32 *)data));
+}
+
+/**
+ * m5mols_read - I2C read function
+ * @reg: combination of size, category and command for the I2C packet
+ * @val: read value
+ */
+int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
+ u8 size = I2C_SIZE(reg);
+ u8 category = I2C_CATEGORY(reg);
+ u8 cmd = I2C_COMMAND(reg);
+ struct i2c_msg msg[2];
+ u8 wbuf[5];
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (size != 1 && size != 2 && size != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 5;
+ msg[0].buf = wbuf;
+ wbuf[0] = 5;
+ wbuf[1] = M5MOLS_BYTE_READ;
+ wbuf[2] = category;
+ wbuf[3] = cmd;
+ wbuf[4] = size;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = size + 1;
+ msg[1].buf = rbuf;
+
+ /* minimum stabilization time */
+ usleep_range(200, 200);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
+ size, category, cmd, ret);
+ return ret;
+ }
+
+ *val = m5mols_swap_byte(&rbuf[1], size);
+
+ return 0;
+}
+
+/**
+ * m5mols_write - I2C command write function
+ * @reg: combination of size, category and command for the I2C packet
+ * @val: value to write
+ */
+int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4];
+ u8 category = I2C_CATEGORY(reg);
+ u8 cmd = I2C_COMMAND(reg);
+ u8 size = I2C_SIZE(reg);
+ u32 *buf = (u32 *)&wbuf[4];
+ struct i2c_msg msg[1];
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ if (size != 1 && size != 2 && size != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ msg->addr = client->addr;
+ msg->flags = 0;
+ msg->len = (u16)size + 4;
+ msg->buf = wbuf;
+ wbuf[0] = size + 4;
+ wbuf[1] = M5MOLS_BYTE_WRITE;
+ wbuf[2] = category;
+ wbuf[3] = cmd;
+
+ *buf = m5mols_swap_byte((u8 *)&val, size);
+
+ usleep_range(200, 200);
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0) {
+ v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n",
+ size, category, cmd, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+{
+ u32 busy, i;
+ int ret;
+
+ for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
+ ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+ if (ret < 0)
+ return ret;
+ if ((busy & mask) == mask)
+ return 0;
+ }
+ return -EBUSY;
+}
+
+/**
+ * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
+ *
+ * Before writing desired interrupt value the INT_FACTOR register should
+ * be read to clear pending interrupts.
+ */
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ u32 mask = is_available_af(info) ? REG_INT_AF : 0;
+ u32 dummy;
+ int ret;
+
+ ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+ if (!ret)
+ ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
+ return ret;
+}
+
+/**
+ * m5mols_reg_mode - Write the mode and check busy status
+ *
+ * It always accompanies a little delay changing the M-5MOLS mode, so it is
+ * needed checking current busy status to guarantee right mode.
+ */
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+{
+ int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
+
+ return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode);
+}
+
+/**
+ * m5mols_mode - manage the M-5MOLS's mode
+ * @mode: the required operation mode
+ *
+ * The commands of M-5MOLS are grouped into specific modes. Each functionality
+ * can be guaranteed only when the sensor is operating in mode which which
+ * a command belongs to.
+ */
+int m5mols_mode(struct m5mols_info *info, u32 mode)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ int ret = -EINVAL;
+ u32 reg;
+
+ if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ return ret;
+
+ ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+ if ((!ret && reg == mode) || ret)
+ return ret;
+
+ switch (reg) {
+ case REG_PARAMETER:
+ ret = m5mols_reg_mode(sd, REG_MONITOR);
+ if (!ret && mode == REG_MONITOR)
+ break;
+ if (!ret)
+ ret = m5mols_reg_mode(sd, REG_CAPTURE);
+ break;
+
+ case REG_MONITOR:
+ if (mode == REG_PARAMETER) {
+ ret = m5mols_reg_mode(sd, REG_PARAMETER);
+ break;
+ }
+
+ ret = m5mols_reg_mode(sd, REG_CAPTURE);
+ break;
+
+ case REG_CAPTURE:
+ ret = m5mols_reg_mode(sd, REG_MONITOR);
+ if (!ret && mode == REG_MONITOR)
+ break;
+ if (!ret)
+ ret = m5mols_reg_mode(sd, REG_PARAMETER);
+ break;
+
+ default:
+ v4l2_warn(sd, "Wrong mode: %d\n", mode);
+ }
+
+ if (!ret)
+ info->mode = mode;
+
+ return ret;
+}
+
+/**
+ * m5mols_get_version - retrieve full revisions information of M-5MOLS
+ *
+ * The version information includes revisions of hardware and firmware,
+ * AutoFocus alghorithm version and the version string.
+ */
+static int m5mols_get_version(struct v4l2_subdev *sd)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ union {
+ struct m5mols_version ver;
+ u8 bytes[VERSION_SIZE];
+ } version;
+ u32 *value;
+ u8 cmd = CAT0_VER_CUSTOMER;
+ int ret;
+
+ do {
+ value = (u32 *)&version.bytes[cmd];
+ ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
+ if (ret)
+ return ret;
+ } while (cmd++ != CAT0_VER_AWB);
+
+ do {
+ value = (u32 *)&version.bytes[cmd];
+ ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+ if (ret)
+ return ret;
+ if (cmd >= VERSION_SIZE - 1)
+ return -EINVAL;
+ } while (version.bytes[cmd++]);
+
+ value = (u32 *)&version.bytes[cmd];
+ ret = m5mols_read(sd, AF_VERSION, value);
+ if (ret)
+ return ret;
+
+ /* store version information swapped for being readable */
+ info->ver = version.ver;
+ info->ver.fw = be16_to_cpu(info->ver.fw);
+ info->ver.hw = be16_to_cpu(info->ver.hw);
+ info->ver.param = be16_to_cpu(info->ver.param);
+ info->ver.awb = be16_to_cpu(info->ver.awb);
+
+ v4l2_info(sd, "Manufacturer\t[%s]\n",
+ is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
+ "Samsung Electro-Machanics" :
+ is_manufacturer(info, REG_SAMSUNG_OPTICS) ?
+ "Samsung Fiber-Optics" :
+ is_manufacturer(info, REG_SAMSUNG_TECHWIN) ?
+ "Samsung Techwin" : "None");
+ v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n",
+ info->ver.customer, info->ver.project);
+
+ if (!is_available_af(info))
+ v4l2_info(sd, "No support Auto Focus on this firmware\n");
+
+ return ret;
+}
+
+/**
+ * __find_restype - Lookup M-5MOLS resolution type according to pixel code
+ * @code: pixel code
+ */
+static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code)
+{
+ enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
+
+ do {
+ if (code == m5mols_default_ffmt[type].code)
+ return type;
+ } while (type++ != SIZE_DEFAULT_FFMT);
+
+ return 0;
+}
+
+/**
+ * __find_resolution - Lookup preset and type of M-5MOLS's resolution
+ * @mf: pixel format to find/negotiate the resolution preset for
+ * @type: M-5MOLS resolution type
+ * @resolution: M-5MOLS resolution preset register value
+ *
+ * Find nearest resolution matching resolution preset and adjust mf
+ * to supported values.
+ */
+static int __find_resolution(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf,
+ enum m5mols_restype *type,
+ u32 *resolution)
+{
+ const struct m5mols_resolution *fsize = &m5mols_reg_res[0];
+ const struct m5mols_resolution *match = NULL;
+ enum m5mols_restype stype = __find_restype(mf->code);
+ int i = ARRAY_SIZE(m5mols_reg_res);
+ unsigned int min_err = ~0;
+
+ while (i--) {
+ int err;
+ if (stype == fsize->type) {
+ err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ }
+ fsize++;
+ }
+ if (match) {
+ mf->width = match->width;
+ mf->height = match->height;
+ *resolution = match->reg;
+ *type = stype;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
+ struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which,
+ enum m5mols_restype type)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
+
+ return &info->ffmt[type];
+}
+
+static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (fmt->pad != 0)
+ return -EINVAL;
+
+ format = __find_format(info, fh, fmt->which, info->res_type);
+ if (!format)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ struct v4l2_mbus_framefmt *format = &fmt->format;
+ struct v4l2_mbus_framefmt *sfmt;
+ enum m5mols_restype type;
+ u32 resolution = 0;
+ int ret;
+
+ if (fmt->pad != 0)
+ return -EINVAL;
+
+ ret = __find_resolution(sd, format, &type, &resolution);
+ if (ret < 0)
+ return ret;
+
+ sfmt = __find_format(info, fh, fmt->which, type);
+ if (!sfmt)
+ return 0;
+
+ *sfmt = m5mols_default_ffmt[type];
+ sfmt->width = format->width;
+ sfmt->height = format->height;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ info->resolution = resolution;
+ info->code = format->code;
+ info->res_type = type;
+ }
+
+ return 0;
+}
+
+static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (!code || code->index >= SIZE_DEFAULT_FFMT)
+ return -EINVAL;
+
+ code->code = m5mols_default_ffmt[code->index].code;
+
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
+ .enum_mbus_code = m5mols_enum_mbus_code,
+ .get_fmt = m5mols_get_fmt,
+ .set_fmt = m5mols_set_fmt,
+};
+
+/**
+ * m5mols_sync_controls - Apply default scene mode and the current controls
+ *
+ * This is used only streaming for syncing between v4l2_ctrl framework and
+ * m5mols's controls. First, do the scenemode to the sensor, then call
+ * v4l2_ctrl_handler_setup. It can be same between some commands and
+ * the scenemode's in the default v4l2_ctrls. But, such commands of control
+ * should be prior to the scenemode's one.
+ */
+int m5mols_sync_controls(struct m5mols_info *info)
+{
+ int ret = -EINVAL;
+
+ if (!is_ctrl_synced(info)) {
+ ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_setup(&info->handle);
+ info->ctrl_sync = true;
+ }
+
+ return ret;
+}
+
+/**
+ * m5mols_start_monitor - Start the monitor mode
+ *
+ * Before applying the controls setup the resolution and frame rate
+ * in PARAMETER mode, and then switch over to MONITOR mode.
+ */
+static int m5mols_start_monitor(struct m5mols_info *info)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ int ret;
+
+ ret = m5mols_mode(info, REG_PARAMETER);
+ if (!ret)
+ ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
+ if (!ret)
+ ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
+ if (!ret)
+ ret = m5mols_mode(info, REG_MONITOR);
+ if (!ret)
+ ret = m5mols_sync_controls(info);
+
+ return ret;
+}
+
+static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+
+ if (enable) {
+ int ret = -EINVAL;
+
+ if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ ret = m5mols_start_monitor(info);
+ if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ ret = m5mols_start_capture(info);
+
+ return ret;
+ }
+
+ return m5mols_mode(info, REG_PARAMETER);
+}
+
+static const struct v4l2_subdev_video_ops m5mols_video_ops = {
+ .s_stream = m5mols_s_stream,
+};
+
+static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret;
+
+ info->mode_save = info->mode;
+
+ ret = m5mols_mode(info, REG_PARAMETER);
+ if (!ret)
+ ret = m5mols_set_ctrl(ctrl);
+ if (!ret)
+ ret = m5mols_mode(info, info->mode_save);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
+ .s_ctrl = m5mols_s_ctrl,
+};
+
+static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct m5mols_platform_data *pdata = info->pdata;
+ int ret;
+
+ if (enable) {
+ if (is_powered(info))
+ return 0;
+
+ if (info->set_power) {
+ ret = info->set_power(&client->dev, 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ info->set_power(&client->dev, 0);
+ return ret;
+ }
+
+ gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
+ usleep_range(1000, 1000);
+ info->power = true;
+
+ return ret;
+ }
+
+ if (!is_powered(info))
+ return 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ return ret;
+
+ if (info->set_power)
+ info->set_power(&client->dev, 0);
+
+ gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
+ usleep_range(1000, 1000);
+ info->power = false;
+
+ return ret;
+}
+
+/* m5mols_update_fw - optional firmware update routine */
+int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
+ int (*set_power)(struct m5mols_info *, bool))
+{
+ return 0;
+}
+
+/**
+ * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core.
+ *
+ * Booting internal ARM core makes the M-5MOLS is ready for getting commands
+ * with I2C. It's the first thing to be done after it powered up. It must wait
+ * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting.
+ */
+static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
+ if (ret < 0)
+ return ret;
+
+ msleep(520);
+
+ ret = m5mols_get_version(sd);
+ if (!ret)
+ ret = m5mols_update_fw(sd, m5mols_sensor_power);
+ if (ret)
+ return ret;
+
+ v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n");
+
+ ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI);
+ if (!ret)
+ ret = m5mols_enable_interrupt(sd, REG_INT_AF);
+
+ return ret;
+}
+
+static int m5mols_init_controls(struct m5mols_info *info)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ u16 max_exposure;
+ u16 step_zoom;
+ int ret;
+
+ /* Determine value's range & step of controls for various FW version */
+ ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+ if (!ret)
+ step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(&info->handle, 6);
+ info->autowb = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 0);
+ info->saturation = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_SATURATION,
+ 1, 5, 1, 3);
+ info->zoom = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
+ 1, 70, step_zoom, 1);
+ info->exposure = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
+ 0, max_exposure, 1, (int)max_exposure/2);
+ info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_COLORFX,
+ 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
+ info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
+ 1, 0, V4L2_EXPOSURE_MANUAL);
+
+ sd->ctrl_handler = &info->handle;
+ if (info->handle.error) {
+ v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
+ v4l2_ctrl_handler_free(&info->handle);
+ return info->handle.error;
+ }
+
+ v4l2_ctrl_cluster(2, &info->autoexposure);
+
+ return 0;
+}
+
+/**
+ * m5mols_s_power - Main sensor power control function
+ *
+ * To prevent breaking the lens when the sensor is powered off the Soft-Landing
+ * algorithm is called where available. The Soft-Landing algorithm availability
+ * dependends on the firmware provider.
+ */
+static int m5mols_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret;
+
+ if (on) {
+ ret = m5mols_sensor_power(info, true);
+ if (!ret)
+ ret = m5mols_sensor_armboot(sd);
+ if (!ret)
+ ret = m5mols_init_controls(info);
+ if (ret)
+ return ret;
+
+ info->ffmt[M5MOLS_RESTYPE_MONITOR] =
+ m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR];
+ info->ffmt[M5MOLS_RESTYPE_CAPTURE] =
+ m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE];
+ return ret;
+ }
+
+ if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
+ ret = m5mols_mode(info, REG_MONITOR);
+ if (!ret)
+ ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
+ if (!ret)
+ ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF);
+ if (!ret)
+ ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS,
+ REG_AF_IDLE);
+ if (!ret)
+ v4l2_info(sd, "Success soft-landing lens\n");
+ }
+
+ ret = m5mols_sensor_power(info, false);
+ if (!ret) {
+ v4l2_ctrl_handler_free(&info->handle);
+ info->ctrl_sync = false;
+ }
+
+ return ret;
+}
+
+static int m5mols_log_status(struct v4l2_subdev *sd)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+
+ v4l2_ctrl_handler_log_status(&info->handle, sd->name);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops m5mols_core_ops = {
+ .s_power = m5mols_s_power,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .log_status = m5mols_log_status,
+};
+
+static const struct v4l2_subdev_ops m5mols_ops = {
+ .core = &m5mols_core_ops,
+ .pad = &m5mols_pad_ops,
+ .video = &m5mols_video_ops,
+};
+
+static void m5mols_irq_work(struct work_struct *work)
+{
+ struct m5mols_info *info =
+ container_of(work, struct m5mols_info, work_irq);
+ struct v4l2_subdev *sd = &info->sd;
+ u32 reg;
+ int ret;
+
+ if (!is_powered(info) ||
+ m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+ return;
+
+ switch (info->interrupt & REG_INT_MASK) {
+ case REG_INT_AF:
+ if (!is_available_af(info))
+ break;
+ ret = m5mols_read(sd, AF_STATUS, &reg);
+ v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
+ reg == REG_AF_FAIL ? "Failed" :
+ reg == REG_AF_SUCCESS ? "Success" :
+ reg == REG_AF_IDLE ? "Idle" : "Busy");
+ break;
+ case REG_INT_CAPTURE:
+ if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags))
+ wake_up_interruptible(&info->irq_waitq);
+
+ v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n");
+ break;
+ default:
+ v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg);
+ break;
+ };
+}
+
+static irqreturn_t m5mols_irq_handler(int irq, void *data)
+{
+ struct v4l2_subdev *sd = data;
+ struct m5mols_info *info = to_m5mols(sd);
+
+ schedule_work(&info->work_irq);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit m5mols_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct m5mols_platform_data *pdata = client->dev.platform_data;
+ struct m5mols_info *info;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ if (!gpio_is_valid(pdata->gpio_reset)) {
+ dev_err(&client->dev, "No valid RESET GPIO specified\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->irq) {
+ dev_err(&client->dev, "Interrupt not assigned\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdata = pdata;
+ info->set_power = pdata->set_power;
+
+ ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST");
+ if (ret) {
+ dev_err(&client->dev, "Failed to request gpio: %d\n", ret);
+ goto out_free;
+ }
+ gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity);
+
+ ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ dev_err(&client->dev, "Failed to get regulators: %d\n", ret);
+ goto out_gpio;
+ }
+
+ sd = &info->sd;
+ strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
+ v4l2_i2c_subdev_init(sd, client, &m5mols_ops);
+
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
+ if (ret < 0)
+ goto out_reg;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ init_waitqueue_head(&info->irq_waitq);
+ INIT_WORK(&info->work_irq, m5mols_irq_work);
+ ret = request_irq(pdata->irq, m5mols_irq_handler,
+ IRQF_TRIGGER_RISING, MODULE_NAME, sd);
+ if (ret) {
+ dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
+ goto out_me;
+ }
+ info->res_type = M5MOLS_RESTYPE_MONITOR;
+ return 0;
+out_me:
+ media_entity_cleanup(&sd->entity);
+out_reg:
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+out_gpio:
+ gpio_free(pdata->gpio_reset);
+out_free:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit m5mols_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct m5mols_info *info = to_m5mols(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ free_irq(info->pdata->irq, sd);
+
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+ gpio_free(info->pdata->gpio_reset);
+ media_entity_cleanup(&sd->entity);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id m5mols_id[] = {
+ { MODULE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, m5mols_id);
+
+static struct i2c_driver m5mols_i2c_driver = {
+ .driver = {
+ .name = MODULE_NAME,
+ },
+ .probe = m5mols_probe,
+ .remove = __devexit_p(m5mols_remove),
+ .id_table = m5mols_id,
+};
+
+static int __init m5mols_mod_init(void)
+{
+ return i2c_add_driver(&m5mols_i2c_driver);
+}
+
+static void __exit m5mols_mod_exit(void)
+{
+ i2c_del_driver(&m5mols_i2c_driver);
+}
+
+module_init(m5mols_mod_init);
+module_exit(m5mols_mod_exit);
+
+MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
+MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>");
+MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
new file mode 100644
index 000000000000..b83e36fc6ac6
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -0,0 +1,399 @@
+/*
+ * Register map for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef M5MOLS_REG_H
+#define M5MOLS_REG_H
+
+#define M5MOLS_I2C_MAX_SIZE 4
+#define M5MOLS_BYTE_READ 0x01
+#define M5MOLS_BYTE_WRITE 0x02
+
+#define I2C_CATEGORY(__cat) ((__cat >> 16) & 0xff)
+#define I2C_COMMAND(__comm) ((__comm >> 8) & 0xff)
+#define I2C_SIZE(__reg_s) ((__reg_s) & 0xff)
+#define I2C_REG(__cat, __cmd, __reg_s) ((__cat << 16) | (__cmd << 8) | __reg_s)
+
+/*
+ * Category section register
+ *
+ * The category means set including relevant command of M-5MOLS.
+ */
+#define CAT_SYSTEM 0x00
+#define CAT_PARAM 0x01
+#define CAT_MONITOR 0x02
+#define CAT_AE 0x03
+#define CAT_WB 0x06
+#define CAT_EXIF 0x07
+#define CAT_FD 0x09
+#define CAT_LENS 0x0a
+#define CAT_CAPT_PARM 0x0b
+#define CAT_CAPT_CTRL 0x0c
+#define CAT_FLASH 0x0f /* related to FW, revisions, booting */
+
+/*
+ * Category 0 - SYSTEM mode
+ *
+ * The SYSTEM mode in the M-5MOLS means area available to handle with the whole
+ * & all-round system of sensor. It deals with version/interrupt/setting mode &
+ * even sensor's status. Especially, the M-5MOLS sensor with ISP varies by
+ * packaging & manufacturer, even the customer and project code. And the
+ * function details may vary among them. The version information helps to
+ * determine what methods shall be used in the driver.
+ *
+ * There is many registers between customer version address and awb one. For
+ * more specific contents, see definition if file m5mols.h.
+ */
+#define CAT0_VER_CUSTOMER 0x00 /* customer version */
+#define CAT0_VER_AWB 0x09 /* Auto WB version */
+#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
+#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
+#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
+#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
+#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
+
+#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
+#define REG_SYSINIT 0x00 /* SYSTEM mode */
+#define REG_PARAMETER 0x01 /* PARAMETER mode */
+#define REG_MONITOR 0x02 /* MONITOR mode */
+#define REG_CAPTURE 0x03 /* CAPTURE mode */
+
+#define SYSTEM_CMD(__cmd) I2C_REG(CAT_SYSTEM, cmd, 1)
+#define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1)
+#define REG_SAMSUNG_ELECTRO "SE" /* Samsung Electro-Mechanics */
+#define REG_SAMSUNG_OPTICS "OP" /* Samsung Fiber-Optics */
+#define REG_SAMSUNG_TECHWIN "TB" /* Samsung Techwin */
+
+#define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1)
+#define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1)
+#define REG_INT_MODE (1 << 0)
+#define REG_INT_AF (1 << 1)
+#define REG_INT_ZOOM (1 << 2)
+#define REG_INT_CAPTURE (1 << 3)
+#define REG_INT_FRAMESYNC (1 << 4)
+#define REG_INT_FD (1 << 5)
+#define REG_INT_LENS_INIT (1 << 6)
+#define REG_INT_SOUND (1 << 7)
+#define REG_INT_MASK 0x0f
+
+/*
+ * category 1 - PARAMETER mode
+ *
+ * This category supports function of camera features of M-5MOLS. It means we
+ * can handle with preview(MONITOR) resolution size/frame per second/interface
+ * between the sensor and the Application Processor/even the image effect.
+ */
+#define CAT1_DATA_INTERFACE 0x00 /* interface between sensor and AP */
+#define CAT1_MONITOR_SIZE 0x01 /* resolution at the MONITOR mode */
+#define CAT1_MONITOR_FPS 0x02 /* frame per second at this mode */
+#define CAT1_EFFECT 0x0b /* image effects */
+
+#define PARM_MON_SIZE I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1)
+
+#define PARM_MON_FPS I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1)
+#define REG_FPS_30 0x02
+
+#define PARM_INTERFACE I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1)
+#define REG_INTERFACE_MIPI 0x02
+
+#define PARM_EFFECT I2C_REG(CAT_PARAM, CAT1_EFFECT, 1)
+#define REG_EFFECT_OFF 0x00
+#define REG_EFFECT_NEGA 0x01
+#define REG_EFFECT_EMBOSS 0x06
+#define REG_EFFECT_OUTLINE 0x07
+#define REG_EFFECT_WATERCOLOR 0x08
+
+/*
+ * Category 2 - MONITOR mode
+ *
+ * The MONITOR mode is same as preview mode as we said. The M-5MOLS has another
+ * mode named "Preview", but this preview mode is used at the case specific
+ * vider-recording mode. This mmode supports only YUYV format. On the other
+ * hand, the JPEG & RAW formats is supports by CAPTURE mode. And, there are
+ * another options like zoom/color effect(different with effect in PARAMETER
+ * mode)/anti hand shaking algorithm.
+ */
+#define CAT2_ZOOM 0x01 /* set the zoom position & execute */
+#define CAT2_ZOOM_STEP 0x03 /* set the zoom step */
+#define CAT2_CFIXB 0x09 /* CB value for color effect */
+#define CAT2_CFIXR 0x0a /* CR value for color effect */
+#define CAT2_COLOR_EFFECT 0x0b /* set on/off of color effect */
+#define CAT2_CHROMA_LVL 0x0f /* set chroma level */
+#define CAT2_CHROMA_EN 0x10 /* set on/off of choroma */
+#define CAT2_EDGE_LVL 0x11 /* set sharpness level */
+#define CAT2_EDGE_EN 0x12 /* set on/off sharpness */
+#define CAT2_TONE_CTL 0x25 /* set tone color(contrast) */
+
+#define MON_ZOOM I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1)
+
+#define MON_CFIXR I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1)
+#define MON_CFIXB I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1)
+#define REG_CFIXB_SEPIA 0xd8
+#define REG_CFIXR_SEPIA 0x18
+
+#define MON_EFFECT I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1)
+#define REG_COLOR_EFFECT_OFF 0x00
+#define REG_COLOR_EFFECT_ON 0x01
+
+#define MON_CHROMA_EN I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1)
+#define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1)
+#define REG_CHROMA_OFF 0x00
+#define REG_CHROMA_ON 0x01
+
+#define MON_EDGE_EN I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1)
+#define MON_EDGE_LVL I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1)
+#define REG_EDGE_OFF 0x00
+#define REG_EDGE_ON 0x01
+
+#define MON_TONE_CTL I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1)
+
+/*
+ * Category 3 - Auto Exposure
+ *
+ * The M-5MOLS exposure capbility is detailed as which is similar to digital
+ * camera. This category supports AE locking/various AE mode(range of exposure)
+ * /ISO/flickering/EV bias/shutter/meteoring, and anything else. And the
+ * maximum/minimum exposure gain value depending on M-5MOLS firmware, may be
+ * different. So, this category also provide getting the max/min values. And,
+ * each MONITOR and CAPTURE mode has each gain/shutter/max exposure values.
+ */
+#define CAT3_AE_LOCK 0x00 /* locking Auto exposure */
+#define CAT3_AE_MODE 0x01 /* set AE mode, mode means range */
+#define CAT3_ISO 0x05 /* set ISO */
+#define CAT3_EV_PRESET_MONITOR 0x0a /* EV(scenemode) preset for MONITOR */
+#define CAT3_EV_PRESET_CAPTURE 0x0b /* EV(scenemode) preset for CAPTURE */
+#define CAT3_MANUAL_GAIN_MON 0x12 /* meteoring value for the MONITOR */
+#define CAT3_MAX_GAIN_MON 0x1a /* max gain value for the MONITOR */
+#define CAT3_MANUAL_GAIN_CAP 0x26 /* meteoring value for the CAPTURE */
+#define CAT3_AE_INDEX 0x38 /* AE index */
+
+#define AE_LOCK I2C_REG(CAT_AE, CAT3_AE_LOCK, 1)
+#define REG_AE_UNLOCK 0x00
+#define REG_AE_LOCK 0x01
+
+#define AE_MODE I2C_REG(CAT_AE, CAT3_AE_MODE, 1)
+#define REG_AE_OFF 0x00 /* AE off */
+#define REG_AE_ALL 0x01 /* calc AE in all block integral */
+#define REG_AE_CENTER 0x03 /* calc AE in center weighted */
+#define REG_AE_SPOT 0x06 /* calc AE in specific spot */
+
+#define AE_ISO I2C_REG(CAT_AE, CAT3_ISO, 1)
+#define REG_ISO_AUTO 0x00
+#define REG_ISO_50 0x01
+#define REG_ISO_100 0x02
+#define REG_ISO_200 0x03
+#define REG_ISO_400 0x04
+#define REG_ISO_800 0x05
+
+#define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1)
+#define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1)
+#define REG_SCENE_NORMAL 0x00
+#define REG_SCENE_PORTRAIT 0x01
+#define REG_SCENE_LANDSCAPE 0x02
+#define REG_SCENE_SPORTS 0x03
+#define REG_SCENE_PARTY_INDOOR 0x04
+#define REG_SCENE_BEACH_SNOW 0x05
+#define REG_SCENE_SUNSET 0x06
+#define REG_SCENE_DAWN_DUSK 0x07
+#define REG_SCENE_FALL 0x08
+#define REG_SCENE_NIGHT 0x09
+#define REG_SCENE_AGAINST_LIGHT 0x0a
+#define REG_SCENE_FIRE 0x0b
+#define REG_SCENE_TEXT 0x0c
+#define REG_SCENE_CANDLE 0x0d
+
+#define AE_MAN_GAIN_MON I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2)
+#define AE_MAX_GAIN_MON I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2)
+#define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2)
+
+#define AE_INDEX I2C_REG(CAT_AE, CAT3_AE_INDEX, 1)
+#define REG_AE_INDEX_20_NEG 0x00
+#define REG_AE_INDEX_15_NEG 0x01
+#define REG_AE_INDEX_10_NEG 0x02
+#define REG_AE_INDEX_05_NEG 0x03
+#define REG_AE_INDEX_00 0x04
+#define REG_AE_INDEX_05_POS 0x05
+#define REG_AE_INDEX_10_POS 0x06
+#define REG_AE_INDEX_15_POS 0x07
+#define REG_AE_INDEX_20_POS 0x08
+
+/*
+ * Category 6 - White Balance
+ *
+ * This category provide AWB locking/mode/preset/speed/gain bias, etc.
+ */
+#define CAT6_AWB_LOCK 0x00 /* locking Auto Whitebalance */
+#define CAT6_AWB_MODE 0x02 /* set Auto or Manual */
+#define CAT6_AWB_MANUAL 0x03 /* set Manual(preset) value */
+
+#define AWB_LOCK I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1)
+#define REG_AWB_UNLOCK 0x00
+#define REG_AWB_LOCK 0x01
+
+#define AWB_MODE I2C_REG(CAT_WB, CAT6_AWB_MODE, 1)
+#define REG_AWB_AUTO 0x01 /* AWB off */
+#define REG_AWB_PRESET 0x02 /* AWB preset */
+
+#define AWB_MANUAL I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1)
+#define REG_AWB_INCANDESCENT 0x01
+#define REG_AWB_FLUORESCENT_1 0x02
+#define REG_AWB_FLUORESCENT_2 0x03
+#define REG_AWB_DAYLIGHT 0x04
+#define REG_AWB_CLOUDY 0x05
+#define REG_AWB_SHADE 0x06
+#define REG_AWB_HORIZON 0x07
+#define REG_AWB_LEDLIGHT 0x09
+
+/*
+ * Category 7 - EXIF information
+ */
+#define CAT7_INFO_EXPTIME_NU 0x00
+#define CAT7_INFO_EXPTIME_DE 0x04
+#define CAT7_INFO_TV_NU 0x08
+#define CAT7_INFO_TV_DE 0x0c
+#define CAT7_INFO_AV_NU 0x10
+#define CAT7_INFO_AV_DE 0x14
+#define CAT7_INFO_BV_NU 0x18
+#define CAT7_INFO_BV_DE 0x1c
+#define CAT7_INFO_EBV_NU 0x20
+#define CAT7_INFO_EBV_DE 0x24
+#define CAT7_INFO_ISO 0x28
+#define CAT7_INFO_FLASH 0x2a
+#define CAT7_INFO_SDR 0x2c
+#define CAT7_INFO_QVAL 0x2e
+
+#define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4)
+#define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4)
+#define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4)
+#define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4)
+#define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4)
+#define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4)
+#define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4)
+#define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4)
+#define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4)
+#define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4)
+#define EXIF_INFO_ISO I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2)
+#define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2)
+#define EXIF_INFO_SDR I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2)
+#define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2)
+
+/*
+ * Category 9 - Face Detection
+ */
+#define CAT9_FD_CTL 0x00
+
+#define FD_CTL I2C_REG(CAT_FD, CAT9_FD_CTL, 1)
+#define BIT_FD_EN 0
+#define BIT_FD_DRAW_FACE_FRAME 4
+#define BIT_FD_DRAW_SMILE_LVL 6
+#define REG_FD(shift) (1 << shift)
+#define REG_FD_OFF 0x0
+
+/*
+ * Category A - Lens Parameter
+ */
+#define CATA_AF_MODE 0x01
+#define CATA_AF_EXECUTE 0x02
+#define CATA_AF_STATUS 0x03
+#define CATA_AF_VERSION 0x0a
+
+#define AF_MODE I2C_REG(CAT_LENS, CATA_AF_MODE, 1)
+#define REG_AF_NORMAL 0x00 /* Normal AF, one time */
+#define REG_AF_MACRO 0x01 /* Macro AF, one time */
+#define REG_AF_POWEROFF 0x07
+
+#define AF_EXECUTE I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1)
+#define REG_AF_STOP 0x00
+#define REG_AF_EXE_AUTO 0x01
+#define REG_AF_EXE_CAF 0x02
+
+#define AF_STATUS I2C_REG(CAT_LENS, CATA_AF_STATUS, 1)
+#define REG_AF_FAIL 0x00
+#define REG_AF_SUCCESS 0x02
+#define REG_AF_IDLE 0x04
+#define REG_AF_BUSY 0x05
+
+#define AF_VERSION I2C_REG(CAT_LENS, CATA_AF_VERSION, 1)
+
+/*
+ * Category B - CAPTURE Parameter
+ */
+#define CATB_YUVOUT_MAIN 0x00
+#define CATB_MAIN_IMAGE_SIZE 0x01
+#define CATB_MCC_MODE 0x1d
+#define CATB_WDR_EN 0x2c
+#define CATB_LIGHT_CTRL 0x40
+#define CATB_FLASH_CTRL 0x41
+
+#define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1)
+#define REG_YUV422 0x00
+#define REG_BAYER10 0x05
+#define REG_BAYER8 0x06
+#define REG_JPEG 0x10
+
+#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1)
+
+#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1)
+#define REG_MCC_OFF 0x00
+#define REG_MCC_NORMAL 0x01
+
+#define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1)
+#define REG_WDR_OFF 0x00
+#define REG_WDR_ON 0x01
+#define REG_WDR_AUTO 0x02
+
+#define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1)
+#define REG_LIGHT_OFF 0x00
+#define REG_LIGHT_ON 0x01
+#define REG_LIGHT_AUTO 0x02
+
+#define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1)
+#define REG_FLASH_OFF 0x00
+#define REG_FLASH_ON 0x01
+#define REG_FLASH_AUTO 0x02
+
+/*
+ * Category C - CAPTURE Control
+ */
+#define CATC_CAP_MODE 0x00
+#define CATC_CAP_SEL_FRAME 0x06 /* It determines Single or Multi */
+#define CATC_CAP_START 0x09
+#define CATC_CAP_IMAGE_SIZE 0x0d
+#define CATC_CAP_THUMB_SIZE 0x11
+
+#define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1)
+#define REG_CAP_NONE 0x00
+#define REG_CAP_ANTI_SHAKE 0x02
+
+#define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1)
+
+#define CAPC_START I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1)
+#define REG_CAP_START_MAIN 0x01
+#define REG_CAP_START_THUMB 0x03
+
+#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
+#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+
+/*
+ * Category F - Flash
+ *
+ * This mode provides functions about internal flash stuff and system startup.
+ */
+#define CATF_CAM_START 0x12 /* It starts internal ARM core booting
+ * after power-up */
+
+#define FLASH_CAM_START I2C_REG(CAT_FLASH, CATF_CAM_START, 1)
+#define REG_START_ARM_BOOT 0x01
+
+#endif /* M5MOLS_REG_H */
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 472a69359e60..c9fd04ee70a8 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -391,7 +391,7 @@ static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
};
int i;
- dev_dbg(isp->dev, "");
+ dev_dbg(isp->dev, "ISP IRQ: ");
for (i = 0; i < ARRAY_SIZE(name); i++) {
if ((1 << i) & irqstatus)
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 398864370267..4e4d4122d9a6 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1512,7 +1512,7 @@ static int video_dev_create(struct soc_camera_device *icd)
*/
static int soc_camera_video_start(struct soc_camera_device *icd)
{
- struct device_type *type = icd->vdev->dev.type;
+ const struct device_type *type = icd->vdev->dev.type;
int ret;
if (!icd->dev.parent)
diff --git a/drivers/media/video/uvc/Makefile b/drivers/media/video/uvc/Makefile
index 968c1994eda0..2071ca8a2f03 100644
--- a/drivers/media/video/uvc/Makefile
+++ b/drivers/media/video/uvc/Makefile
@@ -1,3 +1,6 @@
uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
uvc_status.o uvc_isight.o
+ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
+uvcvideo-objs += uvc_entity.o
+endif
obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 823f4b389745..b6eae48d7fb8 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -248,7 +248,7 @@ uint32_t uvc_fraction_to_interval(uint32_t numerator, uint32_t denominator)
* Terminal and unit management
*/
-static struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
{
struct uvc_entity *entity;
@@ -795,9 +795,12 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
+ unsigned int i;
+ extra_size = ALIGN(extra_size, sizeof(*entity->pads));
num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
- size = sizeof(*entity) + extra_size + num_inputs;
+ size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+ + num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
return NULL;
@@ -805,8 +808,17 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
entity->id = id;
entity->type = type;
+ entity->num_links = 0;
+ entity->num_pads = num_pads;
+ entity->pads = ((void *)(entity + 1)) + extra_size;
+
+ for (i = 0; i < num_inputs; ++i)
+ entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+ if (!UVC_ENTITY_IS_OTERM(entity))
+ entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
+
entity->bNrInPins = num_inputs;
- entity->baSourceID = ((__u8 *)entity) + sizeof(*entity) + extra_size;
+ entity->baSourceID = (__u8 *)(&entity->pads[num_pads]);
return entity;
}
@@ -1585,6 +1597,13 @@ static void uvc_delete(struct uvc_device *dev)
uvc_status_cleanup(dev);
uvc_ctrl_cleanup_device(dev);
+ if (dev->vdev.dev)
+ v4l2_device_unregister(&dev->vdev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (media_devnode_is_registered(&dev->mdev.devnode))
+ media_device_unregister(&dev->mdev);
+#endif
+
list_for_each_safe(p, n, &dev->chains) {
struct uvc_video_chain *chain;
chain = list_entry(p, struct uvc_video_chain, list);
@@ -1594,6 +1613,13 @@ static void uvc_delete(struct uvc_device *dev)
list_for_each_safe(p, n, &dev->entities) {
struct uvc_entity *entity;
entity = list_entry(p, struct uvc_entity, list);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ uvc_mc_cleanup_entity(entity);
+#endif
+ if (entity->vdev) {
+ video_device_release(entity->vdev);
+ entity->vdev = NULL;
+ }
kfree(entity);
}
@@ -1616,8 +1642,6 @@ static void uvc_release(struct video_device *vdev)
struct uvc_streaming *stream = video_get_drvdata(vdev);
struct uvc_device *dev = stream->dev;
- video_device_release(vdev);
-
/* Decrement the registered streams count and delete the device when it
* reaches zero.
*/
@@ -1682,7 +1706,7 @@ static int uvc_register_video(struct uvc_device *dev,
* unregistered before the reference is released, so we don't need to
* get another one.
*/
- vdev->parent = &dev->intf->dev;
+ vdev->v4l2_dev = &dev->vdev;
vdev->fops = &uvc_fops;
vdev->release = uvc_release;
strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1731,6 +1755,8 @@ static int uvc_register_terms(struct uvc_device *dev,
ret = uvc_register_video(dev, stream);
if (ret < 0)
return ret;
+
+ term->vdev = stream->vdev;
}
return 0;
@@ -1745,6 +1771,14 @@ static int uvc_register_chains(struct uvc_device *dev)
ret = uvc_register_terms(dev, chain);
if (ret < 0)
return ret;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = uvc_mc_register_entities(chain);
+ if (ret < 0) {
+ uvc_printk(KERN_INFO, "Failed to register entites "
+ "(%d).\n", ret);
+ }
+#endif
}
return 0;
@@ -1814,6 +1848,24 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
+ /* Register the media and V4L2 devices. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strlcpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ strcpy(dev->mdev.bus_info, udev->devpath);
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
+ if (media_device_register(&dev->mdev) < 0)
+ goto error;
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
+ goto error;
+
/* Initialize controls. */
if (uvc_ctrl_init_device(dev) < 0)
goto error;
@@ -1822,7 +1874,7 @@ static int uvc_probe(struct usb_interface *intf,
if (uvc_scan_device(dev) < 0)
goto error;
- /* Register video devices. */
+ /* Register video device nodes. */
if (uvc_register_chains(dev) < 0)
goto error;
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
new file mode 100644
index 000000000000..c3ab0c813be2
--- /dev/null
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -0,0 +1,118 @@
+/*
+ * uvc_entity.c -- USB Video Class driver
+ *
+ * Copyright (C) 2005-2011
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+
+#include "uvcvideo.h"
+
+/* ------------------------------------------------------------------------
+ * Video subdevices registration and unregistration
+ */
+
+static int uvc_mc_register_entity(struct uvc_video_chain *chain,
+ struct uvc_entity *entity)
+{
+ const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+ struct uvc_entity *remote;
+ unsigned int i;
+ u8 remote_pad;
+ int ret = 0;
+
+ for (i = 0; i < entity->num_pads; ++i) {
+ struct media_entity *source;
+ struct media_entity *sink;
+
+ if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
+ if (remote == NULL)
+ return -EINVAL;
+
+ source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
+ ? &remote->vdev->entity : &remote->subdev.entity;
+ sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ ? &entity->vdev->entity : &entity->subdev.entity;
+
+ remote_pad = remote->num_pads - 1;
+ ret = media_entity_create_link(source, remote_pad,
+ sink, i, flags);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
+ ret = v4l2_device_register_subdev(&chain->dev->vdev,
+ &entity->subdev);
+
+ return ret;
+}
+
+static struct v4l2_subdev_ops uvc_subdev_ops = {
+};
+
+void uvc_mc_cleanup_entity(struct uvc_entity *entity)
+{
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
+ media_entity_cleanup(&entity->subdev.entity);
+ else if (entity->vdev != NULL)
+ media_entity_cleanup(&entity->vdev->entity);
+}
+
+static int uvc_mc_init_entity(struct uvc_entity *entity)
+{
+ int ret;
+
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
+ v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
+ strlcpy(entity->subdev.name, entity->name,
+ sizeof(entity->subdev.name));
+
+ ret = media_entity_init(&entity->subdev.entity,
+ entity->num_pads, entity->pads, 0);
+ } else
+ ret = media_entity_init(&entity->vdev->entity,
+ entity->num_pads, entity->pads, 0);
+
+ return ret;
+}
+
+int uvc_mc_register_entities(struct uvc_video_chain *chain)
+{
+ struct uvc_entity *entity;
+ int ret;
+
+ list_for_each_entry(entity, &chain->entities, chain) {
+ ret = uvc_mc_init_entity(entity);
+ if (ret < 0) {
+ uvc_printk(KERN_INFO, "Failed to initialize entity for "
+ "entity %u\n", entity->id);
+ return ret;
+ }
+ }
+
+ list_for_each_entry(entity, &chain->entities, chain) {
+ ret = uvc_mc_register_entity(chain, entity);
+ if (ret < 0) {
+ uvc_printk(KERN_INFO, "Failed to register entity for "
+ "entity %u\n", entity->id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7cf224bae2e5..20107fd3574d 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -98,8 +98,11 @@ struct uvc_xu_control {
#ifdef __KERNEL__
#include <linux/poll.h>
+#include <linux/usb.h>
#include <linux/usb/video.h>
#include <linux/uvcvideo.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
/* --------------------------------------------------------------------------
* UVC constants
@@ -301,6 +304,13 @@ struct uvc_entity {
__u16 type;
char name[64];
+ /* Media controller-related fields. */
+ struct video_device *vdev;
+ struct v4l2_subdev subdev;
+ unsigned int num_pads;
+ unsigned int num_links;
+ struct media_pad *pads;
+
union {
struct {
__u16 wObjectiveFocalLengthMin;
@@ -504,6 +514,10 @@ struct uvc_device {
atomic_t nmappings;
/* Video control interface */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+#endif
+ struct v4l2_device vdev;
__u16 uvc_version;
__u32 clock_frequency;
@@ -583,6 +597,8 @@ extern unsigned int uvc_timeout_param;
/* Core driver */
extern struct uvc_driver uvc_driver;
+extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
+
/* Video buffers queue management. */
extern void uvc_queue_init(struct uvc_video_queue *queue,
enum v4l2_buf_type type, int drop_corrupted);
@@ -616,6 +632,10 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
/* V4L2 interface */
extern const struct v4l2_file_operations uvc_fops;
+/* Media controller */
+extern int uvc_mc_register_entities(struct uvc_video_chain *chain);
+extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
+
/* Video */
extern int uvc_video_init(struct uvc_streaming *stream);
extern int uvc_video_suspend(struct uvc_streaming *stream);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8344fc0ab858..0f09c057e796 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -719,6 +719,15 @@ config MFD_PM8XXX_IRQ
This is required to use certain other PM 8xxx features, such as GPIO
and MPP.
+config MFD_TPS65910
+ bool "TPS65910 Power Management chip"
+ depends on I2C=y && GPIOLIB
+ select MFD_CORE
+ select GPIO_TPS65910
+ help
+ if you say yes here you get support for the TPS65910 series of
+ Power Management chips.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 1acb8f29a96c..efe3cc33ed92 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -93,3 +93,4 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e63782107e2f..02a15d7cb3b0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2005,7 +2005,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
- .mfd_data = &db8500_regulators,
+ .platform_data = &db8500_regulators,
+ .pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-u8500",
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
new file mode 100644
index 000000000000..2bfad5c86cc7
--- /dev/null
+++ b/drivers/mfd/tps65910-irq.c
@@ -0,0 +1,218 @@
+/*
+ * tps65910-irq.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
+ int irq)
+{
+ return (irq - tps65910->irq_base);
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since all
+ * interrupts are clear on read the IRQ line will be reasserted and
+ * the physical IRQ will be handled again if another interrupt is
+ * asserted while we run - in the normal course of events this is a
+ * rare occurrence so we save I2C/SPI reads. We're also assuming that
+ * it's rare to get lots of interrupts firing simultaneously so try to
+ * minimise I/O.
+ */
+static irqreturn_t tps65910_irq(int irq, void *irq_data)
+{
+ struct tps65910 *tps65910 = irq_data;
+ u32 irq_sts;
+ u32 irq_mask;
+ u8 reg;
+ int i;
+
+ tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+ irq_sts = reg;
+ tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+ irq_sts |= reg << 8;
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+ irq_sts |= reg << 16;
+ }
+
+ tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ irq_mask = reg;
+ tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ irq_mask |= reg << 8;
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ irq_mask |= reg << 16;
+ }
+
+ irq_sts &= ~irq_mask;
+
+ if (!irq_sts)
+ return IRQ_NONE;
+
+ for (i = 0; i < tps65910->irq_num; i++) {
+
+ if (!(irq_sts & (1 << i)))
+ continue;
+
+ handle_nested_irq(tps65910->irq_base + i);
+ }
+
+ /* Write the STS register back to clear IRQs we handled */
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+ reg = irq_sts & 0xFF;
+ tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ reg = irq_sts >> 8;
+ tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tps65910_irq_lock(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps65910->irq_lock);
+}
+
+static void tps65910_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+ u32 reg_mask;
+ u8 reg;
+
+ tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ reg_mask = reg;
+ tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ reg_mask |= reg << 8;
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ reg_mask |= reg << 16;
+ }
+
+ if (tps65910->irq_mask != reg_mask) {
+ reg = tps65910->irq_mask & 0xFF;
+ tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+ reg = tps65910->irq_mask >> 8 & 0xFF;
+ tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65911:
+ reg = tps65910->irq_mask >> 16;
+ tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ }
+ }
+ mutex_unlock(&tps65910->irq_lock);
+}
+
+static void tps65910_irq_enable(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+ tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+}
+
+static void tps65910_irq_disable(struct irq_data *data)
+{
+ struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+ tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+}
+
+static struct irq_chip tps65910_irq_chip = {
+ .name = "tps65910",
+ .irq_bus_lock = tps65910_irq_lock,
+ .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
+ .irq_disable = tps65910_irq_disable,
+ .irq_enable = tps65910_irq_enable,
+};
+
+int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ struct tps65910_platform_data *pdata)
+{
+ int ret, cur_irq;
+ int flags = IRQF_ONESHOT;
+
+ if (!irq) {
+ dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+ return -EINVAL;
+ }
+
+ if (!pdata || !pdata->irq_base) {
+ dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+ return -EINVAL;
+ }
+
+ tps65910->irq_mask = 0xFFFFFF;
+
+ mutex_init(&tps65910->irq_lock);
+ tps65910->chip_irq = irq;
+ tps65910->irq_base = pdata->irq_base;
+
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps65910->irq_num = TPS65910_NUM_IRQ;
+ case TPS65911:
+ tps65910->irq_num = TPS65911_NUM_IRQ;
+ }
+
+ /* Register with genirq */
+ for (cur_irq = tps65910->irq_base;
+ cur_irq < tps65910->irq_num + tps65910->irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, tps65910);
+ irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
+ "tps65910", tps65910);
+
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+
+ if (ret != 0)
+ dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+ free_irq(tps65910->chip_irq, tps65910);
+ return 0;
+}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
new file mode 100644
index 000000000000..2229e66d80db
--- /dev/null
+++ b/drivers/mfd/tps65910.c
@@ -0,0 +1,229 @@
+/*
+ * tps65910.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65910.h>
+
+static struct mfd_cell tps65910s[] = {
+ {
+ .name = "tps65910-pmic",
+ },
+ {
+ .name = "tps65910-rtc",
+ },
+ {
+ .name = "tps65910-power",
+ },
+};
+
+
+static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps65910->i2c_client;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps65910->i2c_client;
+ /* we add 1 byte for device register */
+ u8 msg[TPS65910_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > TPS65910_MAX_REGISTER)
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+ return 0;
+}
+
+int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65910->io_mutex);
+ err = tps65910_i2c_read(tps65910, reg, 1, &data);
+ if (err) {
+ dev_err(tps65910->dev, "read from reg %x failed\n", reg);
+ goto out;
+ }
+
+ data |= mask;
+ err = tps65910_i2c_write(tps65910, reg, 1, &data);
+ if (err)
+ dev_err(tps65910->dev, "write to reg %x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65910->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65910_set_bits);
+
+int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65910->io_mutex);
+ err = tps65910_i2c_read(tps65910, reg, 1, &data);
+ if (err) {
+ dev_err(tps65910->dev, "read from reg %x failed\n", reg);
+ goto out;
+ }
+
+ data &= mask;
+ err = tps65910_i2c_write(tps65910, reg, 1, &data);
+ if (err)
+ dev_err(tps65910->dev, "write to reg %x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65910->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65910_clear_bits);
+
+static int tps65910_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps65910 *tps65910;
+ struct tps65910_board *pmic_plat_data;
+ struct tps65910_platform_data *init_data;
+ int ret = 0;
+
+ pmic_plat_data = dev_get_platdata(&i2c->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
+ tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
+ if (tps65910 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tps65910);
+ tps65910->dev = &i2c->dev;
+ tps65910->i2c_client = i2c;
+ tps65910->id = id->driver_data;
+ tps65910->read = tps65910_i2c_read;
+ tps65910->write = tps65910_i2c_write;
+ mutex_init(&tps65910->io_mutex);
+
+ ret = mfd_add_devices(tps65910->dev, -1,
+ tps65910s, ARRAY_SIZE(tps65910s),
+ NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
+
+ ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(tps65910->dev);
+ kfree(tps65910);
+ return ret;
+}
+
+static int tps65910_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(tps65910->dev);
+ kfree(tps65910);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65910_i2c_id[] = {
+ { "tps65910", TPS65910 },
+ { "tps65911", TPS65911 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id);
+
+
+static struct i2c_driver tps65910_i2c_driver = {
+ .driver = {
+ .name = "tps65910",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65910_i2c_probe,
+ .remove = tps65910_i2c_remove,
+ .id_table = tps65910_i2c_id,
+};
+
+static int __init tps65910_i2c_init(void)
+{
+ return i2c_add_driver(&tps65910_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65910_i2c_init);
+
+static void __exit tps65910_i2c_exit(void)
+{
+ i2c_del_driver(&tps65910_i2c_driver);
+}
+module_exit(tps65910_i2c_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
new file mode 100644
index 000000000000..3d2dc56a3d40
--- /dev/null
+++ b/drivers/mfd/tps65911-comparator.c
@@ -0,0 +1,188 @@
+/*
+ * tps65910.c -- TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+#define COMP 0
+#define COMP1 1
+#define COMP2 2
+
+/* Comparator 1 voltage selection table in milivolts */
+static const u16 COMP_VSEL_TABLE[] = {
+ 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650,
+ 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050,
+ 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450,
+ 3500,
+};
+
+struct comparator {
+ const char *name;
+ int reg;
+ int uV_max;
+ const u16 *vsel_table;
+};
+
+static struct comparator tps_comparators[] = {
+ {
+ .name = "COMP1",
+ .reg = TPS65911_VMBCH,
+ .uV_max = 3500,
+ .vsel_table = COMP_VSEL_TABLE,
+ },
+ {
+ .name = "COMP2",
+ .reg = TPS65911_VMBCH2,
+ .uV_max = 3500,
+ .vsel_table = COMP_VSEL_TABLE,
+ },
+};
+
+static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage)
+{
+ struct comparator tps_comp = tps_comparators[id];
+ int curr_voltage = 0;
+ int ret;
+ u8 index = 0, val;
+
+ if (id == COMP)
+ return 0;
+
+ while (curr_voltage < tps_comp.uV_max) {
+ curr_voltage = tps_comp.vsel_table[index];
+ if (curr_voltage >= voltage)
+ break;
+ else if (curr_voltage < voltage)
+ index ++;
+ }
+
+ if (curr_voltage > tps_comp.uV_max)
+ return -EINVAL;
+
+ val = index << 1;
+ ret = tps65910->write(tps65910, tps_comp.reg, 1, &val);
+
+ return ret;
+}
+
+static int comp_threshold_get(struct tps65910 *tps65910, int id)
+{
+ struct comparator tps_comp = tps_comparators[id];
+ int ret;
+ u8 val;
+
+ if (id == COMP)
+ return 0;
+
+ ret = tps65910->read(tps65910, tps_comp.reg, 1, &val);
+ if (ret < 0)
+ return ret;
+
+ val >>= 1;
+ return tps_comp.vsel_table[val];
+}
+
+static ssize_t comp_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(dev->parent);
+ struct attribute comp_attr = attr->attr;
+ int id, uVolt;
+
+ if (!strcmp(comp_attr.name, "comp1_threshold"))
+ id = COMP1;
+ else if (!strcmp(comp_attr.name, "comp2_threshold"))
+ id = COMP2;
+ else
+ return -EINVAL;
+
+ uVolt = comp_threshold_get(tps65910, id);
+
+ return sprintf(buf, "%d\n", uVolt);
+}
+
+static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL);
+static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
+
+static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+ int ret;
+
+ ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot set COMP1 threshold\n");
+ return ret;
+ }
+
+ ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot set COMP2 theshold\n");
+ return ret;
+ }
+
+ /* Create sysfs entry */
+ ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold);
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n");
+
+ ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold);
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n");
+
+ return ret;
+}
+
+static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910;
+
+ tps65910 = dev_get_drvdata(pdev->dev.parent);
+
+ return 0;
+}
+
+static struct platform_driver tps65911_comparator_driver = {
+ .driver = {
+ .name = "tps65911-comparator",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65911_comparator_probe,
+ .remove = __devexit_p(tps65911_comparator_remove),
+};
+
+static int __init tps65911_comparator_init(void)
+{
+ return platform_driver_register(&tps65911_comparator_driver);
+}
+subsys_initcall(tps65911_comparator_init);
+
+static void __exit tps65911_comparator_exit(void)
+{
+ platform_driver_unregister(&tps65911_comparator_driver);
+}
+module_exit(tps65911_comparator_exit);
+
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65911 comparator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65911-comparator");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 200311fea369..e2a52e5cf449 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -609,6 +609,7 @@ static int apds990x_detect(struct apds990x_chip *chip)
return ret;
}
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
static int apds990x_chip_on(struct apds990x_chip *chip)
{
int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
@@ -624,6 +625,7 @@ static int apds990x_chip_on(struct apds990x_chip *chip)
apds990x_mode_on(chip);
return 0;
}
+#endif
static int apds990x_chip_off(struct apds990x_chip *chip)
{
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d019746551f3..2a40d0efdff5 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
static inline bool needs_unaligned_copy(const void *ptr)
{
-#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false;
#else
return ((ptr - NULL) & 3) != 0;
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index e01e08c8c88b..bc685bfc4c33 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -174,7 +174,7 @@ struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
timer_nr = t < max ? (int) t : -1;
} else {
/* check if the requested timer's available */
- if (test_bit(timer_nr, mfgpt->avail))
+ if (!test_bit(timer_nr, mfgpt->avail))
timer_nr = -1;
}
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 668d41e594a9..df03dd3bd0e2 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
return IOC4_VARIANT_PCI_RT;
}
-static void __devinit
+static void
ioc4_load_modules(struct work_struct *work)
{
request_module("sgiioc4");
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index b0c56313dbbb..8cebec5e85ee 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -304,7 +304,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
return 1;
}
/* Readjust the instruction pointer if needed */
- instruction_pointer_set(&kgdbts_regs, ip + offset);
+ ip += offset;
+#ifdef GDB_ADJUSTS_BREAK_OFFSET
+ instruction_pointer_set(&kgdbts_regs, ip);
+#endif
return 0;
}
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 81d7fa4ec0db..150cd7061b80 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
static enum cname cpoint = CN_INVALID;
static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(count_lock);
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
static int lkdtm_parse_commandline(void)
{
int i;
+ unsigned long flags;
if (cpoint_count < 1 || recur_count < 1)
return -EINVAL;
+ spin_lock_irqsave(&count_lock, flags);
count = cpoint_count;
+ spin_unlock_irqrestore(&count_lock, flags);
/* No special parameters */
if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
static void lkdtm_handler(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&count_lock, flags);
count--;
printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
lkdtm_do_action(cptype);
count = cpoint_count;
}
+ spin_unlock_irqrestore(&count_lock, flags);
}
static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ee5109a3cd98..42f067347bc7 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_kfree_skb(skb);
kfree(queued_msg);
}
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index 7aded90f9daa..cfbddbef11de 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -845,7 +845,7 @@ err_iounmap:
err_iounmap_app:
iounmap(config->va_app_base);
err_kzalloc:
- kfree(config);
+ kfree(target);
err_rel_res:
release_mem_region(res1->start, resource_size(res1));
err_rel_res0:
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 1a05fe08e2cb..f91f82eabda7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_debug("%s: done ", __func__);
}
-static unsigned int st_tty_receive(struct tty_struct *tty,
- const unsigned char *data, char *tty_flags, int count)
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+ char *tty_flags, int count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
*/
st_recv(tty->disc_data, data, count);
pr_debug("done %s", __func__);
-
- return count;
}
/* wake-up function called in from the TTY layer
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 71da5641e258..f85e42224559 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
mmc_blk_remove_parts(card, md);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c07322c2658c..6413afa318d2 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q)
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
+ * @subname: partition subname
*
* Initialise a MMC card request queue.
*/
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
mq->queue->limits.max_discard_sectors = UINT_MAX;
if (card->erased_byte == 0)
mq->queue->limits.discard_zeroes_data = 1;
- if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
- mq->queue->limits.discard_granularity =
- card->erase_size << 9;
- mq->queue->limits.discard_alignment =
- card->erase_size << 9;
- }
+ mq->queue->limits.discard_granularity = card->pref_erase << 9;
if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
mq->queue);
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
sema_init(&mq->thread_sem, 1);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
- host->index);
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+ host->index, subname ? subname : "");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d4994..6223ef8dc9cd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -19,7 +19,8 @@ struct mmc_queue {
unsigned int bounce_sg_len;
};
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+ const char *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 68091dda3f31..7843efe22359 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d0c15bfa514..262fff019177 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
+ u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ * With these steps taken, mmc_select_voltage() is also required to
+ * restore the correct voltage setting of the card.
+ */
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ ret = mmc_send_io_op_cond(host, 0, &ocr);
+ if (ret)
+ goto out;
+
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
+
+ host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+ if (!host->ocr) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
+
+out:
mmc_release_host(host);
return ret;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d29b9c36919a..d2565df8a7fb 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev)
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
out:
return ret;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 4941e06fe2e1..7721de942c69 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -51,6 +51,7 @@ static unsigned int fmax = 515633;
* is asserted (likewise for RX)
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
*/
struct variant_data {
unsigned int clkreg;
@@ -60,6 +61,7 @@ struct variant_data {
unsigned int fifohalfsize;
bool sdio;
bool st_clkdiv;
+ bool blksz_datactrl16;
};
static struct variant_data variant_arm = {
@@ -92,6 +94,17 @@ static struct variant_data variant_ux500 = {
.st_clkdiv = true,
};
+static struct variant_data variant_ux500v2 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .datalength_bits = 24,
+ .sdio = true,
+ .st_clkdiv = true,
+ .blksz_datactrl16 = true,
+};
+
/*
* This must be called with host->lock held
*/
@@ -465,7 +478,10 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+ if (variant->blksz_datactrl16)
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+ else
+ datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
@@ -1128,9 +1144,17 @@ static int __devinit mmci_probe(struct amba_device *dev,
else if (ret != -ENOSYS)
goto err_gpio_cd;
+ /*
+ * A gpio pin that will detect cards when inserted and removed
+ * will most likely want to trigger on the edges if it is
+ * 0 when ejected and 1 when inserted (or mutatis mutandis
+ * for the inverted case) so we request triggers on both
+ * edges.
+ */
ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
- mmci_cd_irq, 0,
- DRIVER_NAME " (cd)", host);
+ mmci_cd_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ DRIVER_NAME " (cd)", host);
if (ret >= 0)
host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
@@ -1311,9 +1335,14 @@ static struct amba_id mmci_ids[] = {
},
{
.id = 0x00480180,
- .mask = 0x00ffffff,
+ .mask = 0xf0ffffff,
.data = &variant_ux500,
},
+ {
+ .id = 0x10480180,
+ .mask = 0xf0ffffff,
+ .data = &variant_ux500v2,
+ },
{ 0, 0 },
};
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index e2aecb7f1d5c..ab66f2454dc4 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -25,6 +25,11 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
MODULE_LICENSE("GPL");
enum {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 259ece047afc..dedf3dab8a3b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -429,12 +429,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
return -EINVAL;
}
}
- mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
/* Allow an aux regulator */
reg = regulator_get(host->dev, "vmmc_aux");
host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+ /* For eMMC do not power off when not in sleep state */
+ if (mmc_slot(host).no_regulator_off_init)
+ return 0;
/*
* UGLY HACK: workaround regulator framework bugs.
* When the bootloader leaves a supply active, it's
@@ -959,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
spin_unlock(&host->irq_lock);
if (host->use_dma && dma_ch != -1) {
- dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
+ dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
+ host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
}
@@ -1343,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index b3654293017b..ce500f03df85 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
- if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+ if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
p->pdata = NULL;
+ tmio_mmc_host_remove(host);
+
for (i = 0; i < 3; i++) {
irq = platform_get_irq(pdev, i);
if (irq >= 0)
free_irq(irq, host);
}
- tmio_mmc_host_remove(host);
clk_disable(priv->clk);
clk_put(priv->clk);
kfree(priv);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index ad6347bb02dd..0b09e8239aa0 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_mmc_get_cd(struct mmc_host *mmc)
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cbb03305b77b..d4455ffbefd8 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
static int vub300_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{ /* NOT irq */
- struct vub300_mmc_host *vub300 = NULL;
+ struct vub300_mmc_host *vub300;
struct usb_host_interface *iface_desc;
struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_out_urb) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the command_out_urb\n");
+ dev_err(&udev->dev, "not enough memory for command_out_urb\n");
goto error0;
}
command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_res_urb) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the command_res_urb\n");
+ dev_err(&udev->dev, "not enough memory for command_res_urb\n");
goto error1;
}
/* this also allocates memory for our VUB300 mmc host device */
mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
if (!mmc) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the mmc_host\n");
+ dev_err(&udev->dev, "not enough memory for the mmc_host\n");
goto error4;
}
/* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index bc50d5ea5534..4be8373d43e5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -33,20 +33,6 @@ config MTD_TESTS
should normally be compiled as kernel modules. The modules perform
various checks and verifications when loaded.
-config MTD_PARTITIONS
- bool "MTD partitioning support"
- help
- If you have a device which needs to divide its flash chip(s) up
- into multiple 'partitions', each of which appears to the user as
- a separate MTD device, you require this option to be enabled. If
- unsure, say 'Y'.
-
- Note, however, that you don't need this option for the DiskOnChip
- devices. Partitioning on NFTL 'devices' is a different - that's the
- 'normal' form of partitioning used on a block device.
-
-if MTD_PARTITIONS
-
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
---help---
@@ -99,7 +85,7 @@ endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
bool "Command line partition table parsing"
- depends on MTD_PARTITIONS = "y" && MTD = "y"
+ depends on MTD = "y"
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
@@ -163,8 +149,6 @@ config MTD_AR7_PARTS
---help---
TI AR7 partitioning support
-endif # MTD_PARTITIONS
-
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index d578095fb255..39664c4229ff 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,8 +4,7 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o
-mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 09cb7c8d93b4..e1e122f2f929 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -812,12 +812,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
break;
if (time_after(jiffies, timeo)) {
- /* Urgh. Resume and pretend we weren't here. */
- map_write(map, CMD(0xd0), adr);
- /* Make sure we're in 'read status' mode if it had finished */
- map_write(map, CMD(0x70), adr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ /* Urgh. Resume and pretend we weren't here.
+ * Make sure we're in 'read status' mode if it had finished */
+ put_chip(map, chip, adr);
printk(KERN_ERR "%s: Chip not ready after erase "
"suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
@@ -997,7 +994,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0b49266840b9..23175edd5634 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -462,13 +462,14 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_fixup_major_minor(cfi, extp);
/*
- * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
+ * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
* see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
+ * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
*/
if (extp->MajorVersion != '1' ||
- (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
+ (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c (%#02x/%#02x).\n",
extp->MajorVersion, extp->MinorVersion,
@@ -710,9 +711,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* there was an error (so leave the erase
* routine to recover from it) or we trying to
* use the erase-in-progress sector. */
- map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ put_chip(map, chip, adr);
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
return -EIO;
}
@@ -762,7 +761,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index ed56ad3884fb..179814a95f3a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -296,6 +296,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
+ wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 97183c8c9e33..b78f23169d4e 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -294,7 +294,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
- if (add_mtd_device(&dev->mtd)) {
+ if (mtd_device_register(&dev->mtd, NULL, 0)) {
/* Device didn't get added, so free the entry */
goto devinit_err;
}
@@ -465,7 +465,7 @@ static void __devexit block2mtd_exit(void)
list_for_each_safe(pos, next, &blkmtd_device_list) {
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
- del_mtd_device(&dev->mtd);
+ mtd_device_unregister(&dev->mtd);
INFO("mtd%d: [%s] removed", dev->mtd.index,
dev->mtd.name + strlen("block2mtd: "));
list_del(&dev->list);
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 5bf5f460e132..f7fbf6025ef2 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -597,7 +597,7 @@ void DoC2k_init(struct mtd_info *mtd)
doc2klist = mtd;
mtd->size = this->totlen;
mtd->erasesize = this->erasesize;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -1185,7 +1185,7 @@ static void __exit cleanup_doc2000(void)
this = mtd->priv;
doc2klist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 0990f7803628..241192f05bc8 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -376,7 +376,7 @@ void DoCMil_init(struct mtd_info *mtd)
this->nextdoc = docmillist;
docmillist = mtd;
mtd->size = this->totlen;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -826,7 +826,7 @@ static void __exit cleanup_doc2001(void)
this = mtd->priv;
docmillist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 8b36fa77a195..09ae0adc3ad0 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -499,7 +499,7 @@ void DoCMilPlus_init(struct mtd_info *mtd)
docmilpluslist = mtd;
mtd->size = this->totlen;
mtd->erasesize = this->erasesize;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
return;
}
}
@@ -1091,7 +1091,7 @@ static void __exit cleanup_doc2001plus(void)
this = mtd->priv;
docmilpluslist = this->nextdoc;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
iounmap(this->virtadr);
kfree(this->chips);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4b829f97d56c..772a0ff89e0f 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -684,9 +684,10 @@ static int __init lart_flash_init (void)
#endif
#ifndef HAVE_PARTITIONS
- result = add_mtd_device (&mtd);
+ result = mtd_device_register(&mtd, NULL, 0);
#else
- result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
+ result = mtd_device_register(&mtd, lart_partitions,
+ ARRAY_SIZE(lart_partitions));
#endif
return (result);
@@ -695,9 +696,9 @@ static int __init lart_flash_init (void)
static void __exit lart_flash_exit (void)
{
#ifndef HAVE_PARTITIONS
- del_mtd_device (&mtd);
+ mtd_device_unregister(&mtd);
#else
- del_mtd_partitions (&mtd);
+ mtd_device_unregister(&mtd);
#endif
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 3fb981d4bb51..35180e475c4c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/mod_devicetable.h>
+#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -55,6 +56,9 @@
#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
+/* Used for Spansion flashes only. */
+#define OPCODE_BRWR 0x17 /* Bank register write */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -76,6 +80,8 @@
#define FAST_READ_DUMMY_BYTE 0
#endif
+#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
+
/****************************************************************************/
struct m25p {
@@ -158,11 +164,18 @@ static inline int write_disable(struct m25p *flash)
/*
* Enable/disable 4-byte addressing mode.
*/
-static inline int set_4byte(struct m25p *flash, int enable)
+static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
- u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
-
- return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+ switch (JEDEC_MFR(jedec_id)) {
+ case CFI_MFR_MACRONIX:
+ flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
+ return spi_write(flash->spi, flash->command, 1);
+ default:
+ /* Spansion style */
+ flash->command[0] = OPCODE_BRWR;
+ flash->command[1] = enable << 7;
+ return spi_write(flash->spi, flash->command, 2);
+ }
}
/*
@@ -668,6 +681,7 @@ static const struct spi_device_id m25p_ids[] = {
/* Macronix */
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
@@ -684,6 +698,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
+ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
@@ -729,7 +747,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
@@ -804,6 +825,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct m25p *flash;
struct flash_info *info;
unsigned i;
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
/* Platform data helps sort out which chip type we have, as
* well as how this board partitions it. If we don't have
@@ -868,9 +891,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
* up with the software protection bits set
*/
- if (info->jedec_id >> 16 == 0x1f ||
- info->jedec_id >> 16 == 0x89 ||
- info->jedec_id >> 16 == 0xbf) {
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
+ JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
write_enable(flash);
write_sr(flash, 0);
}
@@ -888,7 +911,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.read = m25p80_read;
/* sst flash chips use AAI word program */
- if (info->jedec_id >> 16 == 0xbf)
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
flash->mtd.write = sst_write;
else
flash->mtd.write = m25p80_write;
@@ -914,7 +937,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* enable 4-byte addressing if the device exceeds 16MiB */
if (flash->mtd.size > 0x1000000) {
flash->addr_width = 4;
- set_4byte(flash, 1);
+ set_4byte(flash, info->jedec_id, 1);
} else
flash->addr_width = 3;
}
@@ -945,48 +968,41 @@ static int __devinit m25p_probe(struct spi_device *spi)
/* partitions should match sector boundaries; and it may be good to
* use readonly partitions for writeprotected sectors (BP2..BP0).
*/
- if (mtd_has_partitions()) {
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
-
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[]
+ = { "cmdlinepart", NULL, };
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes, &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes, &parts, 0);
+ }
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
#ifdef CONFIG_MTD_OF_PARTS
- if (nr_parts <= 0 && spi->dev.of_node) {
- nr_parts = of_mtd_parse_partitions(&spi->dev,
- spi->dev.of_node, &parts);
- }
+ if (nr_parts <= 0 && spi->dev.of_node) {
+ nr_parts = of_mtd_parse_partitions(&spi->dev,
+ spi->dev.of_node, &parts);
+ }
#endif
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
- flash->partitioned = 1;
- return add_mtd_partitions(&flash->mtd, parts, nr_parts);
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
- } else if (data && data->nr_parts)
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- data->nr_parts, data->name);
+ flash->partitioned = 1;
+ }
- return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0;
+ return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
+ -ENODEV : 0;
}
@@ -996,10 +1012,7 @@ static int __devexit m25p_remove(struct spi_device *spi)
int status;
/* Clean up MTD stuff. */
- if (mtd_has_partitions() && flash->partitioned)
- status = del_mtd_partitions(&flash->mtd);
- else
- status = del_mtd_device(&flash->mtd);
+ status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
kfree(flash->command);
kfree(flash);
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 6a9a24a80a6d..8423fb6d4f26 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -220,7 +220,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->writesize = 1;
ret = -EIO;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_ERR
"ms02-nv: Unable to register MTD device, aborting!\n");
goto err_out_csr_res;
@@ -262,7 +262,7 @@ static void __exit ms02nv_remove_one(void)
root_ms02nv_mtd = mp->next;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
release_resource(mp->resource.csr);
kfree(mp->resource.csr);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index c5015cc721d5..13749d458a31 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -637,6 +637,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
struct flash_platform_data *pdata = spi->dev.platform_data;
char *otp_tag = "";
int err = 0;
+ struct mtd_partition *parts;
+ int nr_parts = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@@ -675,33 +677,25 @@ add_dataflash_otp(struct spi_device *spi, char *name,
pagesize, otp_tag);
dev_set_drvdata(&spi->dev, priv);
- if (mtd_has_partitions()) {
- struct mtd_partition *parts;
- int nr_parts = 0;
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = { "cmdlinepart", NULL, };
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[]
- = { "cmdlinepart", NULL, };
-
- nr_parts = parse_mtd_partitions(device,
- part_probes, &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(device, part_probes, &parts,
+ 0);
+ }
- if (nr_parts <= 0 && pdata && pdata->parts) {
- parts = pdata->parts;
- nr_parts = pdata->nr_parts;
- }
+ if (nr_parts <= 0 && pdata && pdata->parts) {
+ parts = pdata->parts;
+ nr_parts = pdata->nr_parts;
+ }
- if (nr_parts > 0) {
- priv->partitioned = 1;
- err = add_mtd_partitions(device, parts, nr_parts);
- goto out;
- }
- } else if (pdata && pdata->nr_parts)
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- pdata->nr_parts, device->name);
+ if (nr_parts > 0) {
+ priv->partitioned = 1;
+ err = mtd_device_register(device, parts, nr_parts);
+ goto out;
+ }
- if (add_mtd_device(device) == 1)
+ if (mtd_device_register(device, NULL, 0) == 1)
err = -ENODEV;
out:
@@ -939,10 +933,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
- if (mtd_has_partitions() && flash->partitioned)
- status = del_mtd_partitions(&flash->mtd);
- else
- status = del_mtd_device(&flash->mtd);
+ status = mtd_device_unregister(&flash->mtd);
if (status == 0) {
dev_set_drvdata(&spi->dev, NULL);
kfree(flash);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 1483e18971ce..2562689ba6b4 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -104,7 +104,7 @@ static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
static void __exit cleanup_mtdram(void)
{
if (mtd_info) {
- del_mtd_device(mtd_info);
+ mtd_device_unregister(mtd_info);
vfree(mtd_info->priv);
kfree(mtd_info);
}
@@ -133,9 +133,8 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->read = ram_read;
mtd->write = ram_write;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0))
return -EIO;
- }
return 0;
}
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8d28fa02a5a2..23423bd00b06 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -115,7 +115,7 @@ static void unregister_devices(void)
struct phram_mtd_list *this, *safe;
list_for_each_entry_safe(this, safe, &phram_list, list) {
- del_mtd_device(&this->mtd);
+ mtd_device_unregister(&this->mtd);
iounmap(this->mtd.priv);
kfree(this->mtd.name);
kfree(this);
@@ -153,7 +153,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.writesize = 1;
ret = -EAGAIN;
- if (add_mtd_device(&new->mtd)) {
+ if (mtd_device_register(&new->mtd, NULL, 0)) {
pr_err("Failed to register new device\n");
goto out2;
}
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 41b8cdcc64cb..ecff765579dd 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -798,7 +798,7 @@ static int __init init_pmc551(void)
mtd->writesize = 1;
mtd->owner = THIS_MODULE;
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
pci_iounmap(PCI_Device, priv->start);
kfree(mtd->priv);
@@ -806,7 +806,7 @@ static int __init init_pmc551(void)
break;
}
- /* Keep a reference as the add_mtd_device worked */
+ /* Keep a reference as the mtd_device_register worked */
pci_dev_get(PCI_Device);
printk(KERN_NOTICE "Registered pmc551 memory device.\n");
@@ -856,7 +856,7 @@ static void __exit cleanup_pmc551(void)
pci_dev_put(priv->dev);
kfree(mtd->priv);
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
kfree(mtd);
found++;
}
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 592016a0668f..e585263161b9 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -210,7 +210,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
(*curmtd)->mtdinfo->writesize = 1;
- if (add_mtd_device((*curmtd)->mtdinfo)) {
+ if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
@@ -231,7 +231,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
- del_mtd_device(slram_mtdlist->mtdinfo);
+ mtd_device_unregister(slram_mtdlist->mtdinfo);
iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index c163e619abc9..1e2c430aaad2 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -66,7 +66,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __initdata sst25l_flash_info[] = {
+static struct flash_info __devinitdata sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -381,6 +381,8 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct sst25l_flash *flash;
struct flash_platform_data *data;
int ret, i;
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -420,46 +422,37 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (mtd_has_partitions()) {
- struct mtd_partition *parts = NULL;
- int nr_parts = 0;
- if (mtd_has_cmdlinepart()) {
- static const char *part_probes[] =
- {"cmdlinepart", NULL};
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] = {"cmdlinepart", NULL};
- nr_parts = parse_mtd_partitions(&flash->mtd,
- part_probes,
- &parts, 0);
- }
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes,
+ &parts, 0);
+ }
- if (nr_parts <= 0 && data && data->parts) {
- parts = data->parts;
- nr_parts = data->nr_parts;
- }
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
- if (nr_parts > 0) {
- for (i = 0; i < nr_parts; i++) {
- DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
- "{.name = %s, .offset = 0x%llx, "
- ".size = 0x%llx (%lldKiB) }\n",
- i, parts[i].name,
- (long long)parts[i].offset,
- (long long)parts[i].size,
- (long long)(parts[i].size >> 10));
- }
-
- flash->partitioned = 1;
- return add_mtd_partitions(&flash->mtd,
- parts, nr_parts);
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
}
- } else if (data && data->nr_parts) {
- dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
- data->nr_parts, data->name);
+ flash->partitioned = 1;
+ return mtd_device_register(&flash->mtd, parts,
+ nr_parts);
}
- ret = add_mtd_device(&flash->mtd);
+ ret = mtd_device_register(&flash->mtd, NULL, 0);
if (ret == 1) {
kfree(flash);
dev_set_drvdata(&spi->dev, NULL);
@@ -469,15 +462,12 @@ static int __devinit sst25l_probe(struct spi_device *spi)
return 0;
}
-static int __exit sst25l_remove(struct spi_device *spi)
+static int __devexit sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
- if (mtd_has_partitions() && flash->partitioned)
- ret = del_mtd_partitions(&flash->mtd);
- else
- ret = del_mtd_device(&flash->mtd);
+ ret = mtd_device_unregister(&flash->mtd);
if (ret == 0)
kfree(flash);
return ret;
@@ -490,7 +480,7 @@ static struct spi_driver sst25l_driver = {
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __exit_p(sst25l_remove),
+ .remove = __devexit_p(sst25l_remove),
};
static int __init sst25l_init(void)
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 12679925b420..65655dd59e1f 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -313,12 +313,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
if (ret) {
/* Oops. something got wrong. */
/* Resume and pretend we weren't here. */
- map_write(map, CMD(LPDDR_RESUME),
- map->pfow_base + PFOW_COMMAND_CODE);
- map_write(map, CMD(LPDDR_START_EXECUTION),
- map->pfow_base + PFOW_COMMAND_EXECUTE);
- chip->state = FL_ERASING;
- chip->oldstate = FL_READY;
+ put_chip(map, chip);
printk(KERN_ERR "%s: suspend operation failed."
"State may be wrong \n", map->name);
return -EIO;
@@ -383,7 +378,6 @@ static void put_chip(struct map_info *map, struct flchip *chip)
switch (chip->oldstate) {
case FL_ERASING:
- chip->state = chip->oldstate;
map_write(map, CMD(LPDDR_RESUME),
map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(LPDDR_START_EXECUTION),
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5069111c81cc..c0c328c5b133 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -82,7 +82,6 @@ config MTD_PHYSMAP_OF
config MTD_PMC_MSP_EVM
tristate "CFI Flash device mapped on PMC-Sierra MSP"
depends on PMC_MSP && MTD_CFI
- select MTD_PARTITIONS
help
This provides a 'mapping' driver which supports the way
in which user-programmable flash chips are connected on the
@@ -122,7 +121,7 @@ config MTD_SC520CDP
config MTD_NETSC520
tristate "CFI Flash device mapped on AMD NetSc520"
- depends on X86 && MTD_CFI && MTD_PARTITIONS
+ depends on X86 && MTD_CFI
help
This enables access routines for the flash chips on the AMD NetSc520
demonstration board. If you have one of these boards and would like
@@ -131,7 +130,6 @@ config MTD_NETSC520
config MTD_TS5500
tristate "JEDEC Flash device mapped on Technologic Systems TS-5500"
depends on X86
- select MTD_PARTITIONS
select MTD_JEDECPROBE
select MTD_CFI_AMDSTD
help
@@ -149,7 +147,7 @@ config MTD_TS5500
config MTD_SBC_GXX
tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
- depends on X86 && MTD_CFI_INTELEXT && MTD_PARTITIONS && MTD_COMPLEX_MAPPINGS
+ depends on X86 && MTD_CFI_INTELEXT && MTD_COMPLEX_MAPPINGS
help
This provides a driver for the on-board flash of Arcom Control
Systems' SBC-GXn family of boards, formerly known as SBC-MediaGX.
@@ -161,7 +159,6 @@ config MTD_SBC_GXX
config MTD_PXA2XX
tristate "CFI Flash device mapped on Intel XScale PXA2xx based boards"
depends on (PXA25x || PXA27x) && MTD_CFI_INTELEXT
- select MTD_PARTITIONS
help
This provides a driver for the NOR flash attached to a PXA2xx chip.
@@ -185,7 +182,7 @@ config MTD_VMAX
config MTD_SCx200_DOCFLASH
tristate "Flash device mapped with DOCCS on NatSemi SCx200"
- depends on SCx200 && MTD_CFI && MTD_PARTITIONS
+ depends on SCx200 && MTD_CFI
help
Enable support for a flash chip mapped using the DOCCS signal on a
National Semiconductor SCx200 processor.
@@ -247,7 +244,7 @@ config MTD_TSUNAMI
config MTD_NETtel
tristate "CFI flash device on SnapGear/SecureEdge"
- depends on X86 && MTD_PARTITIONS && MTD_JEDECPROBE
+ depends on X86 && MTD_JEDECPROBE
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
@@ -269,7 +266,7 @@ config MTD_LANTIQ
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
+ depends on X86 && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -355,7 +352,7 @@ config MTD_CDB89712
config MTD_SA1100
tristate "CFI Flash device mapped on StrongARM SA11x0"
- depends on MTD_CFI && ARCH_SA1100 && MTD_PARTITIONS
+ depends on MTD_CFI && ARCH_SA1100
help
This enables access to the flash chips on most platforms based on
the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
@@ -389,7 +386,7 @@ config MTD_IXP2000
config MTD_FORTUNET
tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET
+ depends on MTD_CFI && SA1100_FORTUNET
help
This enables access to the Flash on the FortuNet board. If you
have such a board, say 'Y'.
@@ -461,7 +458,6 @@ config MTD_PCMCIA_ANONYMOUS
config MTD_BFIN_ASYNC
tristate "Blackfin BF533-STAMP Flash Chip Support"
depends on BFIN533_STAMP && MTD_CFI && MTD_COMPLEX_MAPPINGS
- select MTD_PARTITIONS
default y
help
Map driver which allows for simultaneous utilization of
@@ -473,7 +469,6 @@ config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
depends on GENERIC_GPIO || GPIOLIB
depends on MTD_COMPLEX_MAPPINGS
- select MTD_PARTITIONS
help
Map driver which allows flashes to be partially physically addressed
and assisted by GPIOs.
@@ -482,14 +477,13 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_PARTITIONS && MTD_RAM=y && !MMU
+ depends on MTD_RAM=y && !MMU
help
Map driver to support image based filesystems for uClinux.
config MTD_WRSBC8260
tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
depends on (SBC82xx || SBC8560)
- select MTD_PARTITIONS
select MTD_MAP_BANK_WIDTH_4
select MTD_MAP_BANK_WIDTH_1
select MTD_CFI_I1
@@ -502,7 +496,6 @@ config MTD_WRSBC8260
config MTD_DMV182
tristate "Map driver for Dy-4 SVME/DMV-182 board."
depends on DMV182
- select MTD_PARTITIONS
select MTD_MAP_BANK_WIDTH_32
select MTD_CFI_I8
select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 92de7e3a49a5..e2875d6fe129 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -82,7 +82,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
if (map->rsrc.parent) {
release_resource(&map->rsrc);
}
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -262,7 +262,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 53664188fc47..e5bfd0e093bb 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -88,7 +88,7 @@ map:
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
- if (add_mtd_device(sram_mtd)) {
+ if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("NV-RAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -111,7 +111,7 @@ out:
static void __exit cleanup_autcpu12_maps(void)
{
if (sram_mtd) {
- del_mtd_device(sram_mtd);
+ mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)autcpu12_sram_map.virt);
}
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 1f3049590d9e..608967fe74c6 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -224,8 +224,8 @@ probe_ok:
goto err_probe;
}
- return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
+ return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
+ parsed_nr_parts);
err_probe:
iounmap(bcm963xx_map.virt);
@@ -235,7 +235,7 @@ err_probe:
static int bcm963xx_remove(struct platform_device *pdev)
{
if (bcm963xx_mtd_info) {
- del_mtd_partitions(bcm963xx_mtd_info);
+ mtd_device_unregister(bcm963xx_mtd_info);
map_destroy(bcm963xx_mtd_info);
}
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 85dd18193cf2..d4297a97e100 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,9 +41,7 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
static void switch_to_flash(struct async_state *state)
@@ -124,9 +122,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
switch_back(state);
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
static int __devinit bfin_flash_probe(struct platform_device *pdev)
{
@@ -169,22 +165,17 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, ret);
+ mtd_device_register(state->mtd, pdata->parts, ret);
state->parts = pdata->parts;
-
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
-
- } else
-#endif
- {
+ mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
+ } else {
pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
- add_mtd_device(state->mtd);
+ mtd_device_register(state->mtd, NULL, 0);
}
platform_set_drvdata(pdev, state);
@@ -196,10 +187,8 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(state->mtd);
+ mtd_device_unregister(state->mtd);
kfree(state->parts);
-#endif
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 8d92d8db9a98..c29cbf87ea0c 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -75,7 +75,7 @@ static int __init init_cdb89712_flash (void)
flash_mtd->owner = THIS_MODULE;
- if (add_mtd_device(flash_mtd)) {
+ if (mtd_device_register(flash_mtd, NULL, 0)) {
printk("FLASH device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -141,7 +141,7 @@ static int __init init_cdb89712_sram (void)
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
- if (add_mtd_device(sram_mtd)) {
+ if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("SRAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -209,7 +209,7 @@ static int __init init_cdb89712_bootrom (void)
bootrom_mtd->owner = THIS_MODULE;
bootrom_mtd->erasesize = 0x10000;
- if (add_mtd_device(bootrom_mtd)) {
+ if (mtd_device_register(bootrom_mtd, NULL, 0)) {
printk("BootROM device addition failed\n");
err = -ENOMEM;
goto out_probe;
@@ -249,21 +249,21 @@ static int __init init_cdb89712_maps(void)
static void __exit cleanup_cdb89712_maps(void)
{
if (sram_mtd) {
- del_mtd_device(sram_mtd);
+ mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)cdb89712_sram_map.virt);
release_resource (&cdb89712_sram_resource);
}
if (flash_mtd) {
- del_mtd_device(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
iounmap((void *)cdb89712_flash_map.virt);
release_resource (&cdb89712_flash_resource);
}
if (bootrom_mtd) {
- del_mtd_device(bootrom_mtd);
+ mtd_device_unregister(bootrom_mtd);
map_destroy(bootrom_mtd);
iounmap((void *)cdb89712_bootrom_map.virt);
release_resource (&cdb89712_bootrom_resource);
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 23f551dc8ca8..06f9c9815720 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -224,7 +224,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
{
int i;
- del_mtd_partitions(mtd);
+ mtd_device_unregister(mtd);
if (mtd != clps[0].mtd)
mtd_concat_destroy(mtd);
@@ -292,11 +292,11 @@ static void __init clps_locate_partitions(struct mtd_info *mtd)
if (nr_parts == 0) {
printk(KERN_NOTICE "clps flash: no partition info "
"available, registering whole flash\n");
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
} else {
printk(KERN_NOTICE "clps flash: using %s partition "
"definition\n", part_type);
- add_mtd_partitions(mtd, parsed_parts, nr_parts);
+ mtd_device_register(mtd, parsed_parts, nr_parts);
}
/* Always succeeds. */
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index f71343cd77cc..d16fc9d3b8cd 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -107,7 +107,7 @@ static int __init init_flagadm(void)
mymtd = do_map_probe("cfi_probe", &flagadm_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT);
+ mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
printk(KERN_NOTICE "FlagaDM flash device initialized\n");
return 0;
}
@@ -119,7 +119,7 @@ static int __init init_flagadm(void)
static void __exit cleanup_flagadm(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (flagadm_map.virt) {
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 5fdb7b26cea3..3d0e762fa5f2 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -94,7 +94,7 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -291,7 +291,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index cfacfa6f45dd..85bdece6ab3f 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -93,7 +93,7 @@ static int __init init_dbox2_flash(void)
mymtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
@@ -105,7 +105,7 @@ static int __init init_dbox2_flash(void)
static void __exit cleanup_dbox2_flash(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dbox2_flash_map.virt) {
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b3cb3a183809..7a9e1989c977 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,17 +145,13 @@ static struct map_info dc21285_map = {
/* Partition stuff */
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition *dc21285_parts;
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
static int __init init_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
int nrparts;
-#endif
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
@@ -204,13 +200,8 @@ static int __init init_dc21285(void)
dc21285_mtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
- if (nrparts > 0)
- add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
- else
-#endif
- add_mtd_device(dc21285_mtd);
+ mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
if(machine_is_ebsa285()) {
/*
@@ -232,14 +223,9 @@ static int __init init_dc21285(void)
static void __exit cleanup_dc21285(void)
{
-#ifdef CONFIG_MTD_PARTITIONS
- if (dc21285_parts) {
- del_mtd_partitions(dc21285_mtd);
+ mtd_device_unregister(dc21285_mtd);
+ if (dc21285_parts)
kfree(dc21285_parts);
- } else
-#endif
- del_mtd_device(dc21285_mtd);
-
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 0713e3a5a22c..3e393f0da823 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -450,7 +450,7 @@ static int __init init_dnpc(void)
partition_info[2].mtdp = &lowlvl_parts[1];
partition_info[3].mtdp = &lowlvl_parts[3];
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
/*
** now create a virtual MTD device by concatenating the for partitions
@@ -463,7 +463,8 @@ static int __init init_dnpc(void)
** we do not supply mtd pointers in higlvl_partition_info, so
** add_mtd_partitions() will register the devices.
*/
- add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS);
+ mtd_device_register(merged_mtd, higlvl_partition_info,
+ NUM_HIGHLVL_PARTITIONS);
}
return 0;
@@ -472,12 +473,12 @@ static int __init init_dnpc(void)
static void __exit cleanup_dnpc(void)
{
if(merged_mtd) {
- del_mtd_partitions(merged_mtd);
+ mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (dnpc_map.virt) {
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index d171674eb2ed..6538ac675e00 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -120,7 +120,7 @@ static int __init init_svme182(void)
this_mtd->size >> 20, FLASH_BASE_ADDR);
this_mtd->owner = THIS_MODULE;
- add_mtd_partitions(this_mtd, partitions, num_parts);
+ mtd_device_register(this_mtd, partitions, num_parts);
return 0;
}
@@ -129,7 +129,7 @@ static void __exit cleanup_svme182(void)
{
if (this_mtd)
{
- del_mtd_partitions(this_mtd);
+ mtd_device_unregister(this_mtd);
map_destroy(this_mtd);
}
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index be9e90b44587..fe42a212bb3e 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -15,10 +15,7 @@
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE 0x01000000
@@ -40,8 +37,6 @@ struct map_info edb7312nor_map = {
.phys = WINDOW_ADDR,
};
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* MTD partitioning stuff
*/
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[3] =
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
-
static int mtd_parts_nb = 0;
static struct mtd_partition *mtd_parts = 0;
@@ -96,27 +89,24 @@ static int __init init_edb7312nor(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
if (mtd_parts_nb > 0)
- part_type = "detected";
+ part_type = "detected";
- if (mtd_parts_nb == 0)
- {
+ if (mtd_parts_nb == 0) {
mtd_parts = static_partitions;
mtd_parts_nb = ARRAY_SIZE(static_partitions);
part_type = "static";
}
-#endif
- add_mtd_device(mymtd);
+
if (mtd_parts_nb == 0)
- printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
+ printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
else
- {
printk(KERN_NOTICE MSG_PREFIX
"using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
- }
+ /* Register the whole device first. */
+ mtd_device_register(mymtd, NULL, 0);
+ mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
return 0;
}
@@ -127,7 +117,7 @@ static int __init init_edb7312nor(void)
static void __exit cleanup_edb7312nor(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (edb7312nor_map.virt) {
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 4feb7507ab7c..08322b1c3e81 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -128,7 +128,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -352,7 +352,7 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 1e43124d498b..956e2e4f30ea 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -243,8 +243,9 @@ static int __init init_fortunet(void)
&map_regions[ix].map_info);
}
map_regions[ix].mymtd->owner = THIS_MODULE;
- add_mtd_partitions(map_regions[ix].mymtd,
- map_regions[ix].parts,map_regions_parts[ix]);
+ mtd_device_register(map_regions[ix].mymtd,
+ map_regions[ix].parts,
+ map_regions_parts[ix]);
}
}
if(iy)
@@ -261,7 +262,7 @@ static void __exit cleanup_fortunet(void)
{
if( map_regions[ix].mymtd )
{
- del_mtd_partitions( map_regions[ix].mymtd );
+ mtd_device_unregister(map_regions[ix].mymtd);
map_destroy( map_regions[ix].mymtd );
}
iounmap((void *)map_regions[ix].map_info.virt);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index af5707a80205..7568c5f8b8ae 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -155,9 +155,7 @@ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from,
memcpy_toio(map->virt + (to % state->win_size), from, len);
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
/**
* gpio_flash_probe() - setup a mapping for a GPIO assisted flash
@@ -189,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
*/
static int __devinit gpio_flash_probe(struct platform_device *pdev)
{
- int ret;
+ int nr_parts;
size_t i, arr_size;
struct physmap_flash_data *pdata;
struct resource *memory;
@@ -254,24 +252,21 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
- ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
- if (ret > 0) {
+ nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
+ &pdata->parts, 0);
+ if (nr_parts > 0) {
pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, ret);
kfree(pdata->parts);
-
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
- add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
-
- } else
-#endif
- {
+ nr_parts = pdata->nr_parts;
+ } else {
pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
- add_mtd_device(state->mtd);
+ nr_parts = 0;
}
+ mtd_device_register(state->mtd, pdata->parts, nr_parts);
+
return 0;
}
@@ -282,9 +277,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
do {
gpio_free(state->gpio_addrs[i]);
} while (++i < state->gpio_count);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(state->mtd);
-#endif
+ mtd_device_unregister(state->mtd);
map_destroy(state->mtd);
kfree(state);
return 0;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 72c724fa8c27..7f035860a36b 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -92,18 +92,16 @@ static int __init h720x_mtd_init(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
if (nr_mtd_parts > 0)
part_type = "command line";
-#endif
if (nr_mtd_parts <= 0) {
mtd_parts = h720x_partitions;
nr_mtd_parts = NUM_PARTITIONS;
part_type = "builtin";
}
printk(KERN_INFO "Using %s partition table\n", part_type);
- add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts);
+ mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
return 0;
}
@@ -118,7 +116,7 @@ static void __exit h720x_mtd_cleanup(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 1337a4191a0c..6689dcb3124d 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -67,7 +67,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
- del_mtd_device(map->mtd);
+ mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
@@ -287,7 +287,7 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
- if (add_mtd_device(map->mtd)) {
+ if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 998a27da97f3..404a50cbafa0 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -15,10 +15,7 @@
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE0 0x00800000
@@ -49,8 +46,6 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
},
};
-#ifdef CONFIG_MTD_PARTITIONS
-
/*
* MTD partitioning stuff
*/
@@ -66,8 +61,6 @@ static struct mtd_partition static_partitions[] =
static int mtd_parts_nb[NUM_FLASHBANKS];
static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
-#endif
-
static const char *probes[] = { "cmdlinepart", NULL };
static int __init init_impa7(void)
@@ -104,7 +97,6 @@ static int __init init_impa7(void)
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
probes,
&mtd_parts[i],
@@ -120,12 +112,8 @@ static int __init init_impa7(void)
printk(KERN_NOTICE MSG_PREFIX
"using %s partition definition\n",
part_type);
- add_mtd_partitions(impa7_mtd[i],
- mtd_parts[i], mtd_parts_nb[i]);
-#else
- add_mtd_device(impa7_mtd[i]);
-
-#endif
+ mtd_device_register(impa7_mtd[i],
+ mtd_parts[i], mtd_parts_nb[i]);
}
else
iounmap((void *)impa7_map[i].virt);
@@ -138,11 +126,7 @@ static void __exit cleanup_impa7(void)
int i;
for (i=0; i<NUM_FLASHBANKS; i++) {
if (impa7_mtd[i]) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(impa7_mtd[i]);
-#else
- del_mtd_device(impa7_mtd[i]);
-#endif
+ mtd_device_unregister(impa7_mtd[i]);
map_destroy(impa7_mtd[i]);
iounmap((void *)impa7_map[i].virt);
impa7_map[i].virt = 0;
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index fc1998512eb4..d2f47be8754b 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -66,33 +66,18 @@ struct vr_nor_mtd {
static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
- if (p->nr_parts > 0) {
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
- del_mtd_partitions(p->info);
-#endif
- } else
- del_mtd_device(p->info);
+ mtd_device_unregister(p->info);
}
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
- int err = 0;
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
struct mtd_partition *parts;
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/* register the flash bank */
-#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
/* partition the flash bank */
p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
- if (p->nr_parts > 0)
- err = add_mtd_partitions(p->info, parts, p->nr_parts);
-#endif
- if (p->nr_parts <= 0)
- err = add_mtd_device(p->info);
-
- return err;
+ return mtd_device_register(p->info, parts, p->nr_parts);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 9639d83a9d6c..c00b9175ba9e 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -119,7 +119,7 @@ static int ixp2000_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- del_mtd_partitions(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
if (info->map.map_priv_1)
@@ -230,7 +230,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
if (err > 0) {
- err = add_mtd_partitions(info->mtd, info->partitions, err);
+ err = mtd_device_register(info->mtd, info->partitions, err);
if(err)
dev_err(&dev->dev, "Could not parse partitions\n");
}
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 1f9fde0dad35..155b21942f47 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -162,7 +162,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- del_mtd_partitions(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
if (info->map.virt)
@@ -252,10 +252,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
dev->resource->start);
-#endif
if (nr_parts > 0) {
part_type = "dynamic";
} else {
@@ -263,18 +261,16 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
nr_parts = plat->nr_parts;
part_type = "static";
}
- if (nr_parts == 0) {
+ if (nr_parts == 0)
printk(KERN_NOTICE "IXP4xx flash: no partition info "
"available, registering whole flash\n");
- err = add_mtd_device(info->mtd);
- } else {
+ else
printk(KERN_NOTICE "IXP4xx flash: using %s partition "
"definition\n", part_type);
- err = add_mtd_partitions(info->mtd, info->partitions, nr_parts);
- if(err)
- printk(KERN_ERR "Could not parse partitions\n");
- }
+ err = mtd_device_register(info->mtd, info->partitions, nr_parts);
+ if (err)
+ printk(KERN_ERR "Could not parse partitions\n");
if (err)
goto Error;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 9e054503c4cf..dd0360ba2412 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -138,7 +138,7 @@ static int __init init_l440gx(void)
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
+ mtd_device_register(mymtd, NULL, 0);
return 0;
}
@@ -148,7 +148,7 @@ static int __init init_l440gx(void)
static void __exit cleanup_l440gx(void)
{
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
iounmap(l440gx_map.virt);
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ee2548085334..5936c466e901 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -112,18 +112,9 @@ static int latch_addr_flash_remove(struct platform_device *dev)
latch_addr_data = dev->dev.platform_data;
if (info->mtd != NULL) {
- if (mtd_has_partitions()) {
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
- kfree(info->parts);
- } else if (latch_addr_data->nr_parts) {
- del_mtd_partitions(info->mtd);
- } else {
- del_mtd_device(info->mtd);
- }
- } else {
- del_mtd_device(info->mtd);
- }
+ if (info->nr_parts)
+ kfree(info->parts);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
@@ -215,23 +206,21 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
}
info->mtd->owner = THIS_MODULE;
- if (mtd_has_partitions()) {
-
- err = parse_mtd_partitions(info->mtd,
- (const char **)part_probe_types,
- &info->parts, 0);
- if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
- return 0;
- }
- if (latch_addr_data->nr_parts) {
- pr_notice("Using latch-addr-flash partition information\n");
- add_mtd_partitions(info->mtd, latch_addr_data->parts,
- latch_addr_data->nr_parts);
- return 0;
- }
+ err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
+ &info->parts, 0);
+ if (err > 0) {
+ mtd_device_register(info->mtd, info->parts, err);
+ return 0;
+ }
+ if (latch_addr_data->nr_parts) {
+ pr_notice("Using latch-addr-flash partition information\n");
+ mtd_device_register(info->mtd,
+ latch_addr_data->parts,
+ latch_addr_data->nr_parts);
+ return 0;
}
- add_mtd_device(info->mtd);
+
+ mtd_device_register(info->mtd, NULL, 0);
return 0;
iounmap:
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 0eb5a7c85380..93fa56c33003 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -69,8 +69,8 @@ static int __init init_mbx(void)
mymtd = do_map_probe("jedec_probe", &mbx_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(mymtd, NULL, 0);
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
@@ -81,7 +81,7 @@ static int __init init_mbx(void)
static void __exit cleanup_mbx(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (mbx_map.virt) {
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index c0cb319b2b70..81dc2598bc0a 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -116,14 +116,14 @@ static int __init init_netsc520(void)
}
mymtd->owner = THIS_MODULE;
- add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS );
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_netsc520(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (netsc520_map.virt) {
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a97133eb9d70..eadcfffc4f9c 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -383,13 +383,13 @@ static int __init nettel_init(void)
/* No BIOS regions when AMD boot */
num_intel_partitions -= 2;
}
- rc = add_mtd_partitions(intel_mtd, nettel_intel_partitions,
- num_intel_partitions);
+ rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
+ num_intel_partitions);
#endif
if (amd_mtd) {
- rc = add_mtd_partitions(amd_mtd, nettel_amd_partitions,
- num_amd_partitions);
+ rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
+ num_amd_partitions);
}
#ifdef CONFIG_MTD_CFI_INTELEXT
@@ -419,7 +419,7 @@ static void __exit nettel_cleanup(void)
unregister_reboot_notifier(&nettel_notifier_block);
#endif
if (amd_mtd) {
- del_mtd_partitions(amd_mtd);
+ mtd_device_unregister(amd_mtd);
map_destroy(amd_mtd);
}
if (nettel_mmcrp) {
@@ -432,7 +432,7 @@ static void __exit nettel_cleanup(void)
}
#ifdef CONFIG_MTD_CFI_INTELEXT
if (intel_mtd) {
- del_mtd_partitions(intel_mtd);
+ mtd_device_unregister(intel_mtd);
map_destroy(intel_mtd);
}
if (nettel_intel_map.virt) {
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 23fe1786770f..807ac2a2e686 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -175,7 +175,7 @@ void cleanup_oct5066(void)
int i;
for (i=0; i<2; i++) {
if (oct5066_mtd[i]) {
- del_mtd_device(oct5066_mtd[i]);
+ mtd_device_unregister(oct5066_mtd[i]);
map_destroy(oct5066_mtd[i]);
}
}
@@ -220,7 +220,7 @@ static int __init init_oct5066(void)
oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
if (oct5066_mtd[i]) {
oct5066_mtd[i]->owner = THIS_MODULE;
- add_mtd_device(oct5066_mtd[i]);
+ mtd_device_register(oct5066_mtd[i], NULL, 0);
}
}
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 48f4cf5cb9d1..1d005a3e9b41 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -313,7 +313,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto release;
mtd->owner = THIS_MODULE;
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
pci_set_drvdata(dev, mtd);
@@ -336,7 +336,7 @@ mtd_pci_remove(struct pci_dev *dev)
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
map_destroy(mtd);
map->exit(dev, map);
kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 33dc2829b01b..bbe168b65c26 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -630,7 +630,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.copy_to = pcmcia_copy_to;
}
- if(add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
map_destroy(mtd);
dev->mtd_info = NULL;
dev_err(&dev->p_dev->dev,
@@ -669,7 +669,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
DEBUG(3, "link=0x%p", link);
if(dev->mtd_info) {
- del_mtd_device(dev->mtd_info);
+ mtd_device_unregister(dev->mtd_info);
dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
dev->mtd_info->index);
map_destroy(dev->mtd_info);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1a9b94f0ee54..f64cee4a3bfb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,10 +27,8 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
static int physmap_flash_remove(struct platform_device *dev)
@@ -47,18 +45,9 @@ static int physmap_flash_remove(struct platform_device *dev)
physmap_data = dev->dev.platform_data;
if (info->cmtd) {
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts || physmap_data->nr_parts) {
- del_mtd_partitions(info->cmtd);
-
- if (info->nr_parts)
- kfree(info->parts);
- } else {
- del_mtd_device(info->cmtd);
- }
-#else
- del_mtd_device(info->cmtd);
-#endif
+ mtd_device_unregister(info->cmtd);
+ if (info->nr_parts)
+ kfree(info->parts);
if (info->cmtd != info->mtd[0])
mtd_concat_destroy(info->cmtd);
}
@@ -92,10 +81,8 @@ static const char *rom_probe_types[] = {
"qinfo_probe",
"map_rom",
NULL };
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
NULL };
-#endif
static int physmap_flash_probe(struct platform_device *dev)
{
@@ -188,24 +175,23 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->cmtd, part_probe_types,
- &info->parts, 0);
+ &info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->cmtd, info->parts, err);
+ mtd_device_register(info->cmtd, info->parts, err);
info->nr_parts = err;
return 0;
}
if (physmap_data->nr_parts) {
printk(KERN_NOTICE "Using physmap partition information\n");
- add_mtd_partitions(info->cmtd, physmap_data->parts,
- physmap_data->nr_parts);
+ mtd_device_register(info->cmtd, physmap_data->parts,
+ physmap_data->nr_parts);
return 0;
}
-#endif
- add_mtd_device(info->cmtd);
+ mtd_device_register(info->cmtd, NULL, 0);
+
return 0;
err_out:
@@ -269,14 +255,12 @@ void physmap_configure(unsigned long addr, unsigned long size,
physmap_flash_data.set_vpp = set_vpp;
}
-#ifdef CONFIG_MTD_PARTITIONS
void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
{
physmap_flash_data.nr_parts = num_parts;
physmap_flash_data.parts = parts;
}
#endif
-#endif
static int __init physmap_init(void)
{
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c1d33464aee8..d251d1db129b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,16 +34,12 @@ struct of_flash_list {
struct of_flash {
struct mtd_info *cmtd;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
int list_size; /* number of elements in of_flash_list */
struct of_flash_list list[0];
};
-#ifdef CONFIG_MTD_PARTITIONS
#define OF_FLASH_PARTS(info) ((info)->parts)
-
static int parse_obsolete_partitions(struct platform_device *dev,
struct of_flash *info,
struct device_node *dp)
@@ -89,10 +85,6 @@ static int parse_obsolete_partitions(struct platform_device *dev,
return nr_parts;
}
-#else /* MTD_PARTITIONS */
-#define OF_FLASH_PARTS(info) (0)
-#define parse_partitions(info, dev) (0)
-#endif /* MTD_PARTITIONS */
static int of_flash_remove(struct platform_device *dev)
{
@@ -105,17 +97,14 @@ static int of_flash_remove(struct platform_device *dev)
dev_set_drvdata(&dev->dev, NULL);
if (info->cmtd != info->list[0].mtd) {
- del_mtd_device(info->cmtd);
+ mtd_device_unregister(info->cmtd);
mtd_concat_destroy(info->cmtd);
}
if (info->cmtd) {
- if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->cmtd);
+ if (OF_FLASH_PARTS(info))
kfree(OF_FLASH_PARTS(info));
- } else {
- del_mtd_device(info->cmtd);
- }
+ mtd_device_unregister(info->cmtd);
}
for (i = 0; i < info->list_size; i++) {
@@ -172,7 +161,6 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
}
}
-#ifdef CONFIG_MTD_PARTITIONS
/* When partitions are set we look for a linux,part-probe property which
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
@@ -212,14 +200,11 @@ static void __devinit of_free_probes(const char **probes)
if (probes != part_probe_types_def)
kfree(probes);
}
-#endif
static struct of_device_id of_flash_match[];
static int __devinit of_flash_probe(struct platform_device *dev)
{
-#ifdef CONFIG_MTD_PARTITIONS
const char **part_probe_types;
-#endif
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
@@ -346,7 +331,6 @@ static int __devinit of_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
@@ -356,13 +340,11 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
of_free_probes(part_probe_types);
-#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
if (err < 0)
goto err_out;
}
-#endif
if (err == 0) {
err = parse_obsolete_partitions(dev, info, dp);
@@ -370,11 +352,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- if (err > 0)
- add_mtd_partitions(info->cmtd, info->parts, err);
- else
-#endif
- add_mtd_device(info->cmtd);
+ mtd_device_register(info->cmtd, info->parts, err);
kfree(mtd_list);
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 76a76be5a7bd..9ca1eccba4bc 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -94,14 +94,11 @@ static int platram_remove(struct platform_device *pdev)
return 0;
if (info->mtd) {
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_unregister(info->mtd);
if (info->partitions) {
- del_mtd_partitions(info->mtd);
if (info->free_partitions)
kfree(info->partitions);
}
-#endif
- del_mtd_device(info->mtd);
map_destroy(info->mtd);
}
@@ -231,7 +228,6 @@ static int platram_probe(struct platform_device *pdev)
/* check to see if there are any available partitions, or wether
* to add this device whole */
-#ifdef CONFIG_MTD_PARTITIONS
if (!pdata->nr_partitions) {
/* try to probe using the supplied probe type */
if (pdata->probes) {
@@ -239,24 +235,22 @@ static int platram_probe(struct platform_device *pdev)
&info->partitions, 0);
info->free_partitions = 1;
if (err > 0)
- err = add_mtd_partitions(info->mtd,
+ err = mtd_device_register(info->mtd,
info->partitions, err);
}
}
/* use the static mapping */
else
- err = add_mtd_partitions(info->mtd, pdata->partitions,
- pdata->nr_partitions);
-#endif /* CONFIG_MTD_PARTITIONS */
-
- if (add_mtd_device(info->mtd)) {
- dev_err(&pdev->dev, "add_mtd_device() failed\n");
- err = -ENOMEM;
- }
-
+ err = mtd_device_register(info->mtd, pdata->partitions,
+ pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err)
+ dev_err(&pdev->dev, "failed to register the entire device\n");
+
return err;
exit_free:
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 64aea6acd48e..744ca5cacc9b 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -173,7 +173,7 @@ static int __init init_msp_flash(void)
msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
if (msp_flash[i]) {
msp_flash[i]->owner = THIS_MODULE;
- add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt);
+ mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
} else {
printk(KERN_ERR "map probe failed for flash\n");
ret = -ENXIO;
@@ -188,7 +188,7 @@ static int __init init_msp_flash(void)
cleanup_loop:
while (i--) {
- del_mtd_partitions(msp_flash[i]);
+ mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
kfree(msp_maps[i].name);
iounmap(msp_maps[i].virt);
@@ -207,7 +207,7 @@ static void __exit cleanup_msp_flash(void)
int i;
for (i = 0; i < fcnt; i++) {
- del_mtd_partitions(msp_flash[i]);
+ mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
iounmap((void *)msp_maps[i].virt);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d8ae634d347e..f59d62f74d44 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -104,23 +104,18 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (ret > 0) {
info->nr_parts = ret;
info->parts = parts;
}
-#endif
- if (info->nr_parts) {
- add_mtd_partitions(info->mtd, info->parts,
- info->nr_parts);
- } else {
+ if (!info->nr_parts)
printk("Registering %s as whole device\n",
info->map.name);
- add_mtd_device(info->mtd);
- }
+
+ mtd_device_register(info->mtd, info->parts, info->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
@@ -132,12 +127,7 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts)
- del_mtd_partitions(info->mtd);
- else
-#endif
- del_mtd_device(info->mtd);
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
iounmap(info->map.virt);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 83ed64512c5e..761fb459d2c7 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,10 +25,8 @@
struct rbtx4939_flash_info {
struct mtd_info *mtd;
struct map_info map;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,28 +39,18 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
if (info->mtd) {
-#ifdef CONFIG_MTD_PARTITIONS
struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
+ if (info->nr_parts)
kfree(info->parts);
- } else if (pdata->nr_parts)
- del_mtd_partitions(info->mtd);
- else
- del_mtd_device(info->mtd);
-#else
- del_mtd_device(info->mtd);
-#endif
+ mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
return 0;
}
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[] = { "cmdlinepart", NULL };
-#endif
static int rbtx4939_flash_probe(struct platform_device *dev)
{
@@ -120,23 +108,21 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->mtd, part_probe_types,
&info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
+ mtd_device_register(info->mtd, info->parts, err);
info->nr_parts = err;
return 0;
}
if (pdata->nr_parts) {
pr_notice("Using rbtx4939 partition information\n");
- add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
return 0;
}
-#endif
- add_mtd_device(info->mtd);
+ mtd_device_register(info->mtd, NULL, 0);
return 0;
err_out:
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 3e3ef53d4fd4..ed88225bf667 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -36,7 +36,7 @@ static int __init init_rpxlite(void)
mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
- add_mtd_device(mymtd);
+ mtd_device_register(mymtd, NULL, 0);
return 0;
}
@@ -47,7 +47,7 @@ static int __init init_rpxlite(void)
static void __exit cleanup_rpxlite(void)
{
if (mymtd) {
- del_mtd_device(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (rpxlite_map.virt) {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index da875908ea8e..a9b5e0e5c4c5 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -226,12 +226,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
int i;
if (info->mtd) {
- if (info->nr_parts == 0)
- del_mtd_device(info->mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- else
- del_mtd_partitions(info->mtd);
-#endif
+ mtd_device_unregister(info->mtd);
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
}
@@ -363,28 +358,24 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
/*
* Partition selection stuff.
*/
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
if (nr_parts > 0) {
info->parts = parts;
part_type = "dynamic";
- } else
-#endif
- {
+ } else {
parts = plat->parts;
nr_parts = plat->nr_parts;
part_type = "static";
}
- if (nr_parts == 0) {
+ if (nr_parts == 0)
printk(KERN_NOTICE "SA1100 flash: no partition info "
"available, registering whole flash\n");
- add_mtd_device(info->mtd);
- } else {
+ else
printk(KERN_NOTICE "SA1100 flash: using %s partition "
"definition\n", part_type);
- add_mtd_partitions(info->mtd, parts, nr_parts);
- }
+
+ mtd_device_register(info->mtd, parts, nr_parts);
info->nr_parts = nr_parts;
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 04b2781fc627..556a2dfe94c5 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -182,7 +182,7 @@ static struct mtd_info *all_mtd;
static void cleanup_sbc_gxx(void)
{
if( all_mtd ) {
- del_mtd_partitions( all_mtd );
+ mtd_device_unregister(all_mtd);
map_destroy( all_mtd );
}
@@ -223,7 +223,7 @@ static int __init init_sbc_gxx(void)
all_mtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
- add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS );
+ mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
return 0;
}
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4d8aaaf4bb76..8fead8e46bce 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -266,10 +266,10 @@ static int __init init_sc520cdp(void)
/* Combine the two flash banks into a single MTD device & register it: */
merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
if(merged_mtd)
- add_mtd_device(merged_mtd);
+ mtd_device_register(merged_mtd, NULL, 0);
}
if(devices_found == 3) /* register the third (DIL-Flash) device */
- add_mtd_device(mymtd[2]);
+ mtd_device_register(mymtd[2], NULL, 0);
return(devices_found ? 0 : -ENXIO);
}
@@ -278,11 +278,11 @@ static void __exit cleanup_sc520cdp(void)
int i;
if (merged_mtd) {
- del_mtd_device(merged_mtd);
+ mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd[2])
- del_mtd_device(mymtd[2]);
+ mtd_device_unregister(mymtd[2]);
for (i = 0; i < NUM_FLASH_BANKS; i++) {
if (mymtd[i])
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 7e329f09a548..d88c8426bb0f 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -180,7 +180,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
scb2_mtd->owner = THIS_MODULE;
if (scb2_fixup_mtd(scb2_mtd) < 0) {
- del_mtd_device(scb2_mtd);
+ mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
if (!region_fail)
@@ -192,7 +192,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
(unsigned long long)scb2_mtd->size,
(unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
- add_mtd_device(scb2_mtd);
+ mtd_device_register(scb2_mtd, NULL, 0);
return 0;
}
@@ -207,7 +207,7 @@ scb2_flash_remove(struct pci_dev *dev)
if (scb2_mtd->lock)
scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
- del_mtd_device(scb2_mtd);
+ mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 027e628a4f1d..f1c1f737d0d7 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -44,7 +44,6 @@ static struct resource docmem = {
static struct mtd_info *mymtd;
-#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition partition_info[] = {
{
.name = "DOCCS Boot kernel",
@@ -68,8 +67,6 @@ static struct mtd_partition partition_info[] = {
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-#endif
-
static struct map_info scx200_docflash_map = {
.name = "NatSemi SCx200 DOCCS Flash",
@@ -198,24 +195,17 @@ static int __init init_scx200_docflash(void)
mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_PARTITIONS
partition_info[3].offset = mymtd->size-partition_info[3].size;
partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
- add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
-#else
- add_mtd_device(mymtd);
-#endif
+ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
+
return 0;
}
static void __exit cleanup_scx200_docflash(void)
{
if (mymtd) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mymtd);
-#else
- del_mtd_device(mymtd);
-#endif
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (scx200_docflash_map.virt) {
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 0eb41d9c6786..cbf6bade9354 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -89,7 +89,7 @@ static int __init init_soleng_maps(void)
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
if (eprom_mtd) {
eprom_mtd->owner = THIS_MODULE;
- add_mtd_device(eprom_mtd);
+ mtd_device_register(eprom_mtd, NULL, 0);
}
nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
@@ -104,9 +104,9 @@ static int __init init_soleng_maps(void)
#endif /* CONFIG_MTD_SUPERH_RESERVE */
if (nr_parts > 0)
- add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
+ mtd_device_register(flash_mtd, parsed_parts, nr_parts);
else
- add_mtd_device(flash_mtd);
+ mtd_device_register(flash_mtd, NULL, 0);
return 0;
}
@@ -114,14 +114,14 @@ static int __init init_soleng_maps(void)
static void __exit cleanup_soleng_maps(void)
{
if (eprom_mtd) {
- del_mtd_device(eprom_mtd);
+ mtd_device_unregister(eprom_mtd);
map_destroy(eprom_mtd);
}
if (parsed_parts)
- del_mtd_partitions(flash_mtd);
+ mtd_device_unregister(flash_mtd);
else
- del_mtd_device(flash_mtd);
+ mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 3f1cb328a574..2d66234f57cb 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -101,7 +101,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
up->mtd->owner = THIS_MODULE;
- add_mtd_device(up->mtd);
+ mtd_device_register(up->mtd, NULL, 0);
dev_set_drvdata(&op->dev, up);
@@ -126,7 +126,7 @@ static int __devexit uflash_remove(struct platform_device *op)
struct uflash_dev *up = dev_get_drvdata(&op->dev);
if (up->mtd) {
- del_mtd_device(up->mtd);
+ mtd_device_unregister(up->mtd);
map_destroy(up->mtd);
}
if (up->map.virt) {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 0718dfb3ee64..d78587990e7e 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -62,7 +62,6 @@ static void __iomem *start_scan_addr;
* "struct map_desc *_io_desc" for the corresponding machine.
*/
-#ifdef CONFIG_MTD_PARTITIONS
/* Currently, TQM8xxL has up to 8MiB flash */
static unsigned long tqm8xxl_max_flash_size = 0x00800000;
@@ -107,7 +106,6 @@ static struct mtd_partition tqm8xxl_fs_partitions[] = {
//.size = MTDPART_SIZ_FULL,
}
};
-#endif
static int __init init_tqm_mtd(void)
{
@@ -188,7 +186,6 @@ static int __init init_tqm_mtd(void)
goto error_mem;
}
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Select Static partition definitions
*/
@@ -201,21 +198,14 @@ static int __init init_tqm_mtd(void)
part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
for(idx = 0; idx < num_banks ; idx++) {
- if (part_banks[idx].nums == 0) {
+ if (part_banks[idx].nums == 0)
printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
- add_mtd_device(mtd_banks[idx]);
- } else {
+ else
printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
idx, part_banks[idx].type);
- add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
- part_banks[idx].nums);
- }
+ mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
+ part_banks[idx].nums);
}
-#else
- printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
- for(idx = 0 ; idx < num_banks ; idx++)
- add_mtd_device(mtd_banks[idx]);
-#endif
return 0;
error_mem:
for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
@@ -237,7 +227,7 @@ static void __exit cleanup_tqm_mtd(void)
for(idx = 0 ; idx < num_banks ; idx++) {
/* destroy mtd_info previously allocated */
if (mtd_banks[idx]) {
- del_mtd_partitions(mtd_banks[idx]);
+ mtd_device_unregister(mtd_banks[idx]);
map_destroy(mtd_banks[idx]);
}
/* release map_info not used anymore */
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index e02dfa9d4ddd..d1d671daf235 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -89,7 +89,7 @@ static int __init init_ts5500_map(void)
}
mymtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS);
+ mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
return 0;
@@ -102,7 +102,7 @@ err2:
static void __exit cleanup_ts5500_map(void)
{
if (mymtd) {
- del_mtd_partitions(mymtd);
+ mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 77a8bfc02577..1de390e1c2fb 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -76,7 +76,7 @@ static void __exit cleanup_tsunami_flash(void)
struct mtd_info *mtd;
mtd = tsunami_flash_mtd;
if (mtd) {
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
map_destroy(mtd);
}
tsunami_flash_mtd = 0;
@@ -97,7 +97,7 @@ static int __init init_tsunami_flash(void)
}
if (tsunami_flash_mtd) {
tsunami_flash_mtd->owner = THIS_MODULE;
- add_mtd_device(tsunami_flash_mtd);
+ mtd_device_register(tsunami_flash_mtd, NULL, 0);
return 0;
}
return -ENXIO;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 35009294b435..6793074f3f40 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -89,11 +89,7 @@ static int __init uclinux_mtd_init(void)
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
-#ifdef CONFIG_MTD_PARTITIONS
- add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
-#else
- add_mtd_device(mtd);
-#endif
+ mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
return(0);
}
@@ -103,11 +99,7 @@ static int __init uclinux_mtd_init(void)
static void __exit uclinux_mtd_cleanup(void)
{
if (uclinux_ram_mtdinfo) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(uclinux_ram_mtdinfo);
-#else
- del_mtd_device(uclinux_ram_mtdinfo);
-#endif
+ mtd_device_unregister(uclinux_ram_mtdinfo);
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 6adaa6acc193..5e68de73eabc 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -138,7 +138,7 @@ static void __exit cleanup_vmax301(void)
for (i=0; i<2; i++) {
if (vmax_mtd[i]) {
- del_mtd_device(vmax_mtd[i]);
+ mtd_device_unregister(vmax_mtd[i]);
map_destroy(vmax_mtd[i]);
}
}
@@ -176,7 +176,7 @@ static int __init init_vmax301(void)
vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
vmax_mtd[i]->owner = THIS_MODULE;
- add_mtd_device(vmax_mtd[i]);
+ mtd_device_register(vmax_mtd[i], NULL, 0);
}
}
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 4afc167731ef..3a04b078576a 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -563,7 +563,7 @@ static void vmu_queryblocks(struct mapleq *mq)
goto fail_cache_create;
part_cur->pcache = pcache;
- error = add_mtd_device(mtd_cur);
+ error = mtd_device_register(mtd_cur, NULL, 0);
if (error)
goto fail_mtd_register;
@@ -709,7 +709,7 @@ static void __devexit vmu_disconnect(struct maple_device *mdev)
for (x = 0; x < card->partitions; x++) {
mpart = ((card->mtd)[x]).priv;
mpart->mdev = NULL;
- del_mtd_device(&((card->mtd)[x]));
+ mtd_device_unregister(&((card->mtd)[x]));
kfree(((card->parts)[x]).name);
}
kfree(card->parts);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 933a2b6598b4..901ce968efae 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -132,17 +132,20 @@ static int __init init_sbc82xx_flash(void)
nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
&sbcmtd_parts[i], 0);
if (nr_parts > 0) {
- add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts);
+ mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
+ nr_parts);
continue;
}
/* No partitioning detected. Use default */
if (i == 2) {
- add_mtd_device(sbcmtd[i]);
+ mtd_device_register(sbcmtd[i], NULL, 0);
} else if (i == bigflash) {
- add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts));
+ mtd_device_register(sbcmtd[i], bigflash_parts,
+ ARRAY_SIZE(bigflash_parts));
} else {
- add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts));
+ mtd_device_register(sbcmtd[i], smallflash_parts,
+ ARRAY_SIZE(smallflash_parts));
}
}
return 0;
@@ -157,9 +160,9 @@ static void __exit cleanup_sbc82xx_flash(void)
continue;
if (i<2 || sbcmtd_parts[i])
- del_mtd_partitions(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
else
- del_mtd_device(sbcmtd[i]);
+ mtd_device_unregister(sbcmtd[i]);
kfree(sbcmtd_parts[i]);
map_destroy(sbcmtd[i]);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a534e1f0c348..ca385697446e 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -221,15 +221,33 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
kref_get(&dev->ref);
__module_get(dev->tr->owner);
- if (dev->mtd) {
- ret = dev->tr->open ? dev->tr->open(dev) : 0;
- __get_mtd_device(dev->mtd);
+ if (!dev->mtd)
+ goto unlock;
+
+ if (dev->tr->open) {
+ ret = dev->tr->open(dev);
+ if (ret)
+ goto error_put;
}
+ ret = __get_mtd_device(dev->mtd);
+ if (ret)
+ goto error_release;
+
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
+
+error_release:
+ if (dev->tr->release)
+ dev->tr->release(dev);
+error_put:
+ module_put(dev->tr->owner);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4c36ef66a46b..3f92731a5b9e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -166,10 +166,23 @@ static int mtd_close(struct inode *inode, struct file *file)
return 0;
} /* mtd_close */
-/* FIXME: This _really_ needs to die. In 2.5, we should lock the
- userspace buffer down and use it directly with readv/writev.
-*/
-#define MAX_KMALLOC_SIZE 0x20000
+/* Back in June 2001, dwmw2 wrote:
+ *
+ * FIXME: This _really_ needs to die. In 2.5, we should lock the
+ * userspace buffer down and use it directly with readv/writev.
+ *
+ * The implementation below, using mtd_kmalloc_up_to, mitigates
+ * allocation failures when the system is under low-memory situations
+ * or if memory is highly fragmented at the cost of reducing the
+ * performance of the requested transfer due to a smaller buffer size.
+ *
+ * A more complex but more memory-efficient implementation based on
+ * get_user_pages and iovecs to cover extents of those pages is a
+ * longer-term goal, as intimated by dwmw2 above. However, for the
+ * write case, this requires yet more complex head and tail transfer
+ * handling when those head and tail offsets and sizes are such that
+ * alignment requirements are not met in the NAND subdriver.
+ */
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
@@ -179,6 +192,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
size_t total_retlen=0;
int ret=0;
int len;
+ size_t size = count;
char *kbuf;
DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
@@ -189,23 +203,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
if (!count)
return 0;
- /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
- and pass them directly to the MTD functions */
-
- if (count > MAX_KMALLOC_SIZE)
- kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
- else
- kbuf=kmalloc(count, GFP_KERNEL);
-
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
-
- if (count > MAX_KMALLOC_SIZE)
- len = MAX_KMALLOC_SIZE;
- else
- len = count;
+ len = min_t(size_t, count, size);
switch (mfi->mode) {
case MTD_MODE_OTP_FACTORY:
@@ -268,6 +271,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ size_t size = count;
char *kbuf;
size_t retlen;
size_t total_retlen=0;
@@ -285,20 +289,12 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
if (!count)
return 0;
- if (count > MAX_KMALLOC_SIZE)
- kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
- else
- kbuf=kmalloc(count, GFP_KERNEL);
-
+ kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
-
- if (count > MAX_KMALLOC_SIZE)
- len = MAX_KMALLOC_SIZE;
- else
- len = count;
+ len = min_t(size_t, count, size);
if (copy_from_user(kbuf, buf, len)) {
kfree(kbuf);
@@ -512,7 +508,6 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
static int mtd_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
@@ -548,8 +543,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
return -EINVAL;
}
}
-#endif
-
static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
@@ -941,7 +934,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
-#ifdef CONFIG_MTD_PARTITIONS
case BLKPG:
{
ret = mtd_blkpg_ioctl(mtd,
@@ -955,7 +947,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
ret = 0;
break;
}
-#endif
default:
ret = -ENOTTY;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 5060e608ea5d..e601672a5305 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- ops->retlen = 0;
+ ops->retlen = ops->oobretlen = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
devops.len = subdev->size - to;
err = subdev->write_oob(subdev, to, &devops);
- ops->retlen += devops.retlen;
+ ops->retlen += devops.oobretlen;
if (err)
return err;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index da69bc8a5a7d..c510aff289a8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
@@ -37,6 +38,7 @@
#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include "mtdcore.h"
/*
@@ -391,7 +393,7 @@ fail_locked:
* if the requested device does not appear to be present in the list.
*/
-int del_mtd_device (struct mtd_info *mtd)
+int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
@@ -427,6 +429,50 @@ out_error:
}
/**
+ * mtd_device_register - register an MTD device.
+ *
+ * @master: the MTD device to register
+ * @parts: the partitions to register - only valid if nr_parts > 0
+ * @nr_parts: the number of partitions in parts. If zero then the full MTD
+ * device is registered
+ *
+ * Register an MTD device with the system and optionally, a number of
+ * partitions. If nr_parts is 0 then the whole device is registered, otherwise
+ * only the partitions are registered. To register both the full device *and*
+ * the partitions, call mtd_device_register() twice, once with nr_parts == 0
+ * and once equal to the number of partitions.
+ */
+int mtd_device_register(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nr_parts)
+{
+ return parts ? add_mtd_partitions(master, parts, nr_parts) :
+ add_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister. This will unregister both the master
+ * and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+ int err;
+
+ err = del_mtd_partitions(master);
+ if (err)
+ return err;
+
+ if (!device_is_registered(&master->dev))
+ return 0;
+
+ return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
* register_mtd_user - register a 'user' of MTD devices.
* @new: pointer to notifier info structure
*
@@ -443,7 +489,7 @@ void register_mtd_user (struct mtd_notifier *new)
list_add(&new->list, &mtd_notifiers);
- __module_get(THIS_MODULE);
+ __module_get(THIS_MODULE);
mtd_for_each_device(mtd)
new->add(mtd);
@@ -532,7 +578,6 @@ int __get_mtd_device(struct mtd_info *mtd)
return -ENODEV;
if (mtd->get_device) {
-
err = mtd->get_device(mtd);
if (err) {
@@ -570,21 +615,13 @@ struct mtd_info *get_mtd_device_nm(const char *name)
if (!mtd)
goto out_unlock;
- if (!try_module_get(mtd->owner))
+ err = __get_mtd_device(mtd);
+ if (err)
goto out_unlock;
- if (mtd->get_device) {
- err = mtd->get_device(mtd);
- if (err)
- goto out_put;
- }
-
- mtd->usecount++;
mutex_unlock(&mtd_table_mutex);
return mtd;
-out_put:
- module_put(mtd->owner);
out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
@@ -638,8 +675,54 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
return ret;
}
-EXPORT_SYMBOL_GPL(add_mtd_device);
-EXPORT_SYMBOL_GPL(del_mtd_device);
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
+ __GFP_NORETRY | __GFP_NO_KSWAPD;
+ size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+ void *kbuf;
+
+ *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+ while (*size > min_alloc) {
+ kbuf = kmalloc(*size, flags);
+ if (kbuf)
+ return kbuf;
+
+ *size >>= 1;
+ *size = ALIGN(*size, mtd->writesize);
+ }
+
+ /*
+ * For the last resort allocation allow 'kmalloc()' to do all sorts of
+ * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+ */
+ return kmalloc(*size, GFP_KERNEL);
+}
+
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -648,6 +731,7 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
@@ -656,44 +740,32 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info(char *buf, struct mtd_info *this)
-{
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
- (unsigned long long)this->size,
- this->erasesize, this->name);
-}
-
-static int mtd_read_proc (char *page, char **start, off_t off, int count,
- int *eof, void *data_unused)
+static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
- int len, l;
- off_t begin = 0;
+ seq_puts(m, "dev: size erasesize name\n");
mutex_lock(&mtd_table_mutex);
-
- len = sprintf(page, "dev: size erasesize name\n");
mtd_for_each_device(mtd) {
- l = mtd_proc_info(page + len, mtd);
- len += l;
- if (len+begin > off+count)
- goto done;
- if (len+begin < off) {
- begin += len;
- len = 0;
- }
- }
-
- *eof = 1;
-
-done:
+ seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+ mtd->index, (unsigned long long)mtd->size,
+ mtd->erasesize, mtd->name);
+ }
mutex_unlock(&mtd_table_mutex);
- if (off >= len+begin)
- return 0;
- *start = page + (off-begin);
- return ((count < begin+len-off) ? count : begin+len-off);
+ return 0;
+}
+
+static int mtd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtd_proc_show, NULL);
}
+static const struct file_operations mtd_proc_ops = {
+ .open = mtd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_PROC_FS */
/*====================================================================*/
@@ -734,8 +806,7 @@ static int __init init_mtd(void)
goto err_bdi3;
#ifdef CONFIG_PROC_FS
- if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
- proc_mtd->read_proc = mtd_read_proc;
+ proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
#endif /* CONFIG_PROC_FS */
return 0;
@@ -753,7 +824,7 @@ err_reg:
static void __exit cleanup_mtd(void)
{
#ifdef CONFIG_PROC_FS
- if (proc_mtd)
+ if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
class_unregister(&mtd_class);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 6a64fdebc898..0ed6126b4c1f 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -10,6 +10,12 @@
extern struct mutex mtd_table_mutex;
extern struct mtd_info *__mtd_next_device(int i);
+extern int add_mtd_device(struct mtd_info *mtd);
+extern int del_mtd_device(struct mtd_info *mtd);
+extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
+ int);
+extern int del_mtd_partitions(struct mtd_info *);
+
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
(mtd) != NULL; \
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 0a4760174782..630be3e7da04 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -31,6 +31,8 @@
#include <linux/mtd/partitions.h>
#include <linux/err.h>
+#include "mtdcore.h"
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
@@ -376,7 +378,6 @@ int del_mtd_partitions(struct mtd_info *master)
return err;
}
-EXPORT_SYMBOL(del_mtd_partitions);
static struct mtd_part *allocate_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
@@ -671,7 +672,6 @@ int add_mtd_partitions(struct mtd_info *master,
return 0;
}
-EXPORT_SYMBOL(add_mtd_partitions);
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
@@ -722,11 +722,8 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
parser = get_partition_parser(*types);
- if (!parser) {
- printk(KERN_NOTICE "%s partition parsing not available\n",
- *types);
+ if (!parser)
continue;
- }
ret = (*parser->parse_fn)(master, pparts, origin);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fed215c4cfa1..fd7885327611 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1450,7 +1450,13 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
}
oinfo = mtd->ecclayout;
- if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ if (!oinfo) {
+ printk(KERN_ERR "%s: mtd%d does not have OOB\n",
+ MTDSWAP_PREFIX, mtd->index);
+ return;
+ }
+
+ if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index edec457d361d..4c3425235adc 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -92,7 +92,7 @@ config MTD_NAND_EDB7312
config MTD_NAND_H1900
tristate "iPAQ H1900 flash"
- depends on ARCH_PXA && MTD_PARTITIONS
+ depends on ARCH_PXA
help
This enables the driver for the iPAQ h1900 flash.
@@ -419,7 +419,6 @@ config MTD_NAND_TMIO
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
- depends on MTD_PARTITIONS
help
The simulator may simulate various NAND flash chips for the
MTD nand layer.
@@ -513,7 +512,7 @@ config MTD_NAND_SOCRATES
config MTD_NAND_NUC900
tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
- depends on ARCH_W90X900 && MTD_PARTITIONS
+ depends on ARCH_W90X900
help
This enables the driver for the NAND Flash on evaluation board based
on w90p910 / NUC9xx.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 8691e0482ed2..eb40ea829ab2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -120,7 +120,7 @@ static void alauda_delete(struct kref *kref)
struct alauda *al = container_of(kref, struct alauda, kref);
if (al->mtd) {
- del_mtd_device(al->mtd);
+ mtd_device_unregister(al->mtd);
kfree(al->mtd);
}
usb_put_dev(al->dev);
@@ -592,7 +592,7 @@ static int alauda_init_media(struct alauda *al)
mtd->priv = al;
mtd->owner = THIS_MODULE;
- err = add_mtd_device(mtd);
+ err = mtd_device_register(mtd, NULL, 0);
if (err) {
err = -ENFILE;
goto error;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index bc65bf71e1a2..78017eb9318e 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -235,8 +235,8 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
}
/* Register the partitions */
- add_mtd_partitions(ams_delta_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(ams_delta_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
goto out;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 950646aa4c4b..b300705d41cb 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -30,6 +30,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/dmaengine.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -494,11 +495,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct resource *regs;
struct resource *mem;
int res;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_partitions = 0;
-#endif
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -656,7 +654,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_scan_tail;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "atmel_nand";
num_partitions = parse_mtd_partitions(mtd, part_probes,
@@ -672,17 +669,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
goto err_no_partitions;
}
- res = add_mtd_partitions(mtd, partitions, num_partitions);
-#else
- res = add_mtd_device(mtd);
-#endif
-
+ res = mtd_device_register(mtd, partitions, num_partitions);
if (!res)
return res;
-#ifdef CONFIG_MTD_PARTITIONS
err_no_partitions:
-#endif
nand_release(mtd);
err_scan_tail:
err_scan_ident:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5d513b54a7d7..e7767eef4505 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -581,7 +581,8 @@ static int __init au1xxx_nand_init(void)
}
/* Register the partitions */
- add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
+ mtd_device_register(au1550_mtd, partition_info,
+ ARRAY_SIZE(partition_info));
return 0;
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index 0911cf03db80..eddc9a224985 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -185,20 +185,20 @@ static int __init autcpu12_init(void)
/* Register the partitions */
switch (autcpu12_mtd->size) {
case SZ_16M:
- add_mtd_partitions(autcpu12_mtd, partition_info16k,
- NUM_PARTITIONS16K);
+ mtd_device_register(autcpu12_mtd, partition_info16k,
+ NUM_PARTITIONS16K);
break;
case SZ_32M:
- add_mtd_partitions(autcpu12_mtd, partition_info32k,
- NUM_PARTITIONS32K);
+ mtd_device_register(autcpu12_mtd, partition_info32k,
+ NUM_PARTITIONS32K);
break;
case SZ_64M:
- add_mtd_partitions(autcpu12_mtd, partition_info64k,
- NUM_PARTITIONS64K);
+ mtd_device_register(autcpu12_mtd, partition_info64k,
+ NUM_PARTITIONS64K);
break;
case SZ_128M:
- add_mtd_partitions(autcpu12_mtd, partition_info128k,
- NUM_PARTITIONS128K);
+ mtd_device_register(autcpu12_mtd, partition_info128k,
+ NUM_PARTITIONS128K);
break;
default:
printk("Unsupported SmartMedia device\n");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index dfe262c726fb..9ec280738a9a 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,9 +52,7 @@
static const __devinitconst char gBanner[] = KERN_INFO \
"BCM UMI MTD NAND Driver: 1.00\n";
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
#if NAND_ECC_BCH
static uint8_t scan_ff_pattern[] = { 0xff };
@@ -509,7 +507,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
kfree(board_mtd);
return -EIO;
}
- add_mtd_partitions(board_mtd, partition_info, nr_partitions);
+ mtd_device_register(board_mtd, partition_info, nr_partitions);
}
/* Return happy */
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 79947bea4d57..dd899cb5d366 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -659,15 +659,10 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts = info->platform->partitions;
int nr = info->platform->nr_partitions;
- return add_mtd_partitions(mtd, parts, nr);
-#else
- return add_mtd_device(mtd);
-#endif
+ return mtd_device_register(mtd, parts, nr);
}
static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e06c8983978e..87ebb4e5b0c3 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,9 +90,7 @@ static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-#endif
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -632,10 +630,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
int nr_parts;
-#endif
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -804,9 +800,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mtd);
/* We register the whole device first, separate from the partitions */
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "cafe_nand";
#endif
@@ -814,9 +809,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
if (nr_parts > 0) {
cafe->parts = parts;
dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
- add_mtd_partitions(mtd, parts, nr_parts);
+ mtd_device_register(mtd, parts, nr_parts);
}
-#endif
goto out;
out_irq:
@@ -838,7 +832,6 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
- del_mtd_device(mtd);
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6e6495278258..6fc043a30d1e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -238,7 +238,7 @@ static int __init cmx270_init(void)
/* Register the partitions */
pr_notice("Using %s partition definition\n", part_type);
- ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
+ ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
if (ret)
goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 71c35a0b9826..f59ad1f2d5db 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -277,22 +277,15 @@ static int is_geode(void)
return 0;
}
-
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
-
static int __init cs553x_init(void)
{
int err = -ENXIO;
int i;
uint64_t val;
-
-#ifdef CONFIG_MTD_PARTITIONS
int mtd_parts_nb = 0;
struct mtd_partition *mtd_parts = NULL;
-#endif
/* If the CPU isn't a Geode GX or LX, abort */
if (!is_geode())
@@ -324,17 +317,11 @@ static int __init cs553x_init(void)
if (cs553x_mtd[i]) {
/* If any devices registered, return success. Else the last error. */
-#ifdef CONFIG_MTD_PARTITIONS
mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0) {
+ if (mtd_parts_nb > 0)
printk(KERN_NOTICE "Using command line partition definition\n");
- add_mtd_partitions(cs553x_mtd[i], mtd_parts, mtd_parts_nb);
- } else {
- add_mtd_device(cs553x_mtd[i]);
- }
-#else
- add_mtd_device(cs553x_mtd[i]);
-#endif
+ mtd_device_register(cs553x_mtd[i], mtd_parts,
+ mtd_parts_nb);
err = 0;
}
}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index aff3468867ac..1f34951ae1a7 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -530,6 +530,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ struct mtd_partition *mtd_parts = NULL;
+ int mtd_parts_nb = 0;
/* insist on board-specific configuration */
if (!pdata)
@@ -749,41 +751,33 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- if (mtd_has_partitions()) {
- struct mtd_partition *mtd_parts = NULL;
- int mtd_parts_nb = 0;
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] __initconst = {
+ "cmdlinepart", NULL
+ };
- if (mtd_has_cmdlinepart()) {
- static const char *probes[] __initconst =
- { "cmdlinepart", NULL };
-
- mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
- &mtd_parts, 0);
- }
-
- if (mtd_parts_nb <= 0) {
- mtd_parts = pdata->parts;
- mtd_parts_nb = pdata->nr_parts;
- }
+ mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
+ &mtd_parts, 0);
+ }
- /* Register any partitions */
- if (mtd_parts_nb > 0) {
- ret = add_mtd_partitions(&info->mtd,
- mtd_parts, mtd_parts_nb);
- if (ret == 0)
- info->partitioned = true;
- }
+ if (mtd_parts_nb <= 0) {
+ mtd_parts = pdata->parts;
+ mtd_parts_nb = pdata->nr_parts;
+ }
- } else if (pdata->nr_parts) {
- dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
- pdata->nr_parts, info->mtd.name);
+ /* Register any partitions */
+ if (mtd_parts_nb > 0) {
+ ret = mtd_device_register(&info->mtd, mtd_parts,
+ mtd_parts_nb);
+ if (ret == 0)
+ info->partitioned = true;
}
/* If there's no partition info, just package the whole chip
* as a single MTD device.
*/
if (!info->partitioned)
- ret = add_mtd_device(&info->mtd) ? -ENODEV : 0;
+ ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
if (ret < 0)
goto err_scan;
@@ -824,10 +818,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
struct davinci_nand_info *info = platform_get_drvdata(pdev);
int status;
- if (mtd_has_partitions() && info->partitioned)
- status = del_mtd_partitions(&info->mtd);
- else
- status = del_mtd_device(&info->mtd);
+ status = mtd_device_unregister(&info->mtd);
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 4633f094c510..d5276218945f 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -44,16 +45,16 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
/* We define a macro here that combines all interrupts this driver uses into
* a single constant value, for convenience. */
-#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
- INTR_STATUS0__ECC_TRANSACTION_DONE | \
- INTR_STATUS0__ECC_ERR | \
- INTR_STATUS0__PROGRAM_FAIL | \
- INTR_STATUS0__LOAD_COMP | \
- INTR_STATUS0__PROGRAM_COMP | \
- INTR_STATUS0__TIME_OUT | \
- INTR_STATUS0__ERASE_FAIL | \
- INTR_STATUS0__RST_COMP | \
- INTR_STATUS0__ERASE_COMP)
+#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
+ INTR_STATUS__ECC_TRANSACTION_DONE | \
+ INTR_STATUS__ECC_ERR | \
+ INTR_STATUS__PROGRAM_FAIL | \
+ INTR_STATUS__LOAD_COMP | \
+ INTR_STATUS__PROGRAM_COMP | \
+ INTR_STATUS__TIME_OUT | \
+ INTR_STATUS__ERASE_FAIL | \
+ INTR_STATUS__RST_COMP | \
+ INTR_STATUS__ERASE_COMP)
/* indicates whether or not the internal value for the flash bank is
* valid or not */
@@ -95,30 +96,6 @@ static const struct pci_device_id denali_pci_ids[] = {
{ /* end: all zeroes */ }
};
-
-/* these are static lookup tables that give us easy access to
- * registers in the NAND controller.
- */
-static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1,
- INTR_STATUS2,
- INTR_STATUS3};
-
-static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
-
-static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
-
-static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -180,19 +157,17 @@ static void read_status(struct denali_nand_info *denali)
static void reset_bank(struct denali_nand_info *denali)
{
uint32_t irq_status = 0;
- uint32_t irq_mask = reset_complete[denali->flash_bank] |
- operation_timeout[denali->flash_bank];
- int bank = 0;
+ uint32_t irq_mask = INTR_STATUS__RST_COMP |
+ INTR_STATUS__TIME_OUT;
clear_interrupts(denali);
- bank = device_reset_banks[denali->flash_bank];
- iowrite32(bank, denali->flash_reg + DEVICE_RESET);
+ iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
irq_status = wait_for_irq(denali, irq_mask);
- if (irq_status & operation_timeout[denali->flash_bank])
- dev_err(&denali->dev->dev, "reset bank failed.\n");
+ if (irq_status & INTR_STATUS__TIME_OUT)
+ dev_err(denali->dev, "reset bank failed.\n");
}
/* Reset the flash controller */
@@ -200,29 +175,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
{
uint32_t i;
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(reset_complete[i] | operation_timeout[i],
- denali->flash_reg + intr_status_addresses[i]);
+ for (i = 0 ; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- iowrite32(device_reset_banks[i],
- denali->flash_reg + DEVICE_RESET);
+ for (i = 0 ; i < denali->max_banks; i++) {
+ iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
while (!(ioread32(denali->flash_reg +
- intr_status_addresses[i]) &
- (reset_complete[i] | operation_timeout[i])))
+ INTR_STATUS(i)) &
+ (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
cpu_relax();
- if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
- operation_timeout[i])
- dev_dbg(&denali->dev->dev,
+ if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
+ INTR_STATUS__TIME_OUT)
+ dev_dbg(denali->dev,
"NAND Reset operation timed out on bank %d\n", i);
}
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(reset_complete[i] | operation_timeout[i],
- denali->flash_reg + intr_status_addresses[i]);
+ for (i = 0; i < denali->max_banks; i++)
+ iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+ denali->flash_reg + INTR_STATUS(i));
return PASS;
}
@@ -254,7 +228,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t acc_clks;
uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
en_lo = CEIL_DIV(Trp[mode], CLK_X);
@@ -291,7 +265,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
acc_clks++;
if ((data_invalid - acc_clks * CLK_X) < 2)
- dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n",
+ dev_warn(denali->dev, "%s, Line %d: Warning!\n",
__FILE__, __LINE__);
addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
@@ -419,7 +393,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
#endif
break;
default:
- dev_warn(&denali->dev->dev,
+ dev_warn(denali->dev,
"Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
"Will use default parameter values instead.\n",
device_id);
@@ -431,17 +405,17 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
*/
static void find_valid_banks(struct denali_nand_info *denali)
{
- uint32_t id[LLD_MAX_FLASH_BANKS];
+ uint32_t id[denali->max_banks];
int i;
denali->total_used_banks = 1;
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ for (i = 0; i < denali->max_banks; i++) {
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
index_addr_read_data(denali,
(uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"Return 1st ID for bank[%d]: %x\n", i, id[i]);
if (i == 0) {
@@ -461,16 +435,27 @@ static void find_valid_banks(struct denali_nand_info *denali)
* Multichip support is not enabled.
*/
if (denali->total_used_banks != 1) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"Sorry, Intel CE4100 only supports "
"a single NAND device.\n");
BUG();
}
}
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"denali->total_used_banks: %d\n", denali->total_used_banks);
}
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void detect_max_banks(struct denali_nand_info *denali)
+{
+ uint32_t features = ioread32(denali->flash_reg + FEATURES);
+
+ denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+}
+
static void detect_partition_feature(struct denali_nand_info *denali)
{
/* For MRST platform, denali->fwblks represent the
@@ -480,15 +465,15 @@ static void detect_partition_feature(struct denali_nand_info *denali)
* blocks it can't touch.
* */
if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
- PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
+ PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
denali->fwblks =
- ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MIN_VALUE) *
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
+ MIN_MAX_BANK__MIN_VALUE) *
denali->blksperchip)
+
- (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
- MIN_BLK_ADDR_1__VALUE);
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
+ MIN_BLK_ADDR__VALUE);
} else
denali->fwblks = SPECTRA_START_BLOCK;
} else
@@ -501,7 +486,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
uint32_t id_bytes[5], addr;
uint8_t i, maf_id, device_id;
- dev_dbg(&denali->dev->dev,
+ dev_dbg(denali->dev,
"%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
@@ -530,7 +515,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
get_hynix_nand_para(denali, device_id);
}
- dev_info(&denali->dev->dev,
+ dev_info(denali->dev,
"Dump timing register values:"
"acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
"we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
@@ -560,7 +545,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
static void denali_set_intr_modes(struct denali_nand_info *denali,
uint16_t INT_ENABLE)
{
- dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n",
+ dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
if (INT_ENABLE)
@@ -580,6 +565,7 @@ static inline bool is_flash_bank_valid(int flash_bank)
static void denali_irq_init(struct denali_nand_info *denali)
{
uint32_t int_mask = 0;
+ int i;
/* Disable global interrupts */
denali_set_intr_modes(denali, false);
@@ -587,10 +573,8 @@ static void denali_irq_init(struct denali_nand_info *denali)
int_mask = DENALI_IRQ_ALL;
/* Clear all status bits */
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2);
- iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
denali_irq_enable(denali, int_mask);
}
@@ -604,10 +588,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
static void denali_irq_enable(struct denali_nand_info *denali,
uint32_t int_mask)
{
- iowrite32(int_mask, denali->flash_reg + INTR_EN0);
- iowrite32(int_mask, denali->flash_reg + INTR_EN1);
- iowrite32(int_mask, denali->flash_reg + INTR_EN2);
- iowrite32(int_mask, denali->flash_reg + INTR_EN3);
+ int i;
+
+ for (i = 0; i < denali->max_banks; ++i)
+ iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
}
/* This function only returns when an interrupt that this driver cares about
@@ -624,7 +608,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali,
{
uint32_t intr_status_reg = 0;
- intr_status_reg = intr_status_addresses[denali->flash_bank];
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
}
@@ -645,7 +629,7 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali)
{
uint32_t intr_status_reg = 0;
- intr_status_reg = intr_status_addresses[denali->flash_bank];
+ intr_status_reg = INTR_STATUS(denali->flash_bank);
return ioread32(denali->flash_reg + intr_status_reg);
}
@@ -754,7 +738,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
irq_mask = 0;
if (op == DENALI_READ)
- irq_mask = INTR_STATUS0__LOAD_COMP;
+ irq_mask = INTR_STATUS__LOAD_COMP;
else if (op == DENALI_WRITE)
irq_mask = 0;
else
@@ -800,7 +784,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"cmd, page, addr on timeout "
"(0x%x, 0x%x, 0x%x)\n",
cmd, denali->page, addr);
@@ -861,8 +845,8 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL;
+ uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
int status = 0;
denali->page = page;
@@ -875,11 +859,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev, "OOB write failed\n");
+ dev_err(denali->dev, "OOB write failed\n");
status = -EIO;
}
} else {
- dev_err(&denali->dev->dev, "unable to send pipeline command\n");
+ dev_err(denali->dev, "unable to send pipeline command\n");
status = -EIO;
}
return status;
@@ -889,7 +873,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- uint32_t irq_mask = INTR_STATUS0__LOAD_COMP,
+ uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
irq_status = 0, addr = 0x0, cmd = 0x0;
denali->page = page;
@@ -904,7 +888,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0)
- dev_err(&denali->dev->dev, "page on OOB timeout %d\n",
+ dev_err(denali->dev, "page on OOB timeout %d\n",
denali->page);
/* We set the device back to MAIN_ACCESS here as I observed
@@ -944,7 +928,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
{
bool check_erased_page = false;
- if (irq_status & INTR_STATUS0__ECC_ERR) {
+ if (irq_status & INTR_STATUS__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
uint32_t err_address = 0, err_correction_info = 0;
uint32_t err_byte = 0, err_sector = 0, err_device = 0;
@@ -995,7 +979,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
* for a while for this interrupt
* */
while (!(read_interrupt_status(denali) &
- INTR_STATUS0__ECC_TRANSACTION_DONE))
+ INTR_STATUS__ECC_TRANSACTION_DONE))
cpu_relax();
clear_interrupts(denali);
denali_set_intr_modes(denali, true);
@@ -1045,14 +1029,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, bool raw_xfer)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__PROGRAM_FAIL;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
+ INTR_STATUS__PROGRAM_FAIL;
/* if it is a raw xfer, we want to disable ecc, and send
* the spare area.
@@ -1071,7 +1054,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize);
}
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
clear_interrupts(denali);
denali_enable_dma(denali, true);
@@ -1082,16 +1065,16 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
irq_status = wait_for_irq(denali, irq_mask);
if (irq_status == 0) {
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"timeout on write_page (type = %d)\n",
raw_xfer);
denali->status =
- (irq_status & INTR_STATUS0__PROGRAM_FAIL) ?
+ (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
NAND_STATUS_FAIL : PASS;
}
denali_enable_dma(denali, false);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
}
/* NAND core entry points */
@@ -1139,18 +1122,17 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR;
+ uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
+ INTR_STATUS__ECC_ERR;
bool check_erased_page = false;
if (page != denali->page) {
- dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
@@ -1159,7 +1141,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
setup_ecc_for_xfer(denali, true, false);
denali_enable_dma(denali, true);
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
@@ -1167,7 +1149,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
memcpy(buf, denali->buf.buf, mtd->writesize);
@@ -1192,16 +1174,15 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- struct pci_dev *pci_dev = denali->dev;
dma_addr_t addr = denali->buf.dma_buf;
size_t size = denali->mtd.writesize + denali->mtd.oobsize;
uint32_t irq_status = 0;
- uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
+ uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
if (page != denali->page) {
- dev_err(&denali->dev->dev, "IN %s: page %d is not"
+ dev_err(denali->dev, "IN %s: page %d is not"
" equal to denali->page %d, investigate!!",
__func__, page, denali->page);
BUG();
@@ -1210,7 +1191,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
setup_ecc_for_xfer(denali, false, true);
denali_enable_dma(denali, true);
- pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
clear_interrupts(denali);
denali_setup_dma(denali, DENALI_READ);
@@ -1218,7 +1199,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* wait for operation to complete */
irq_status = wait_for_irq(denali, irq_mask);
- pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
denali_enable_dma(denali, false);
@@ -1271,10 +1252,10 @@ static void denali_erase(struct mtd_info *mtd, int page)
index_addr(denali, (uint32_t)cmd, 0x1);
/* wait for erase to complete or failure to occur */
- irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
- INTR_STATUS0__ERASE_FAIL);
+ irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
+ INTR_STATUS__ERASE_FAIL);
- denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ?
+ denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
NAND_STATUS_FAIL : PASS;
}
@@ -1330,7 +1311,7 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
uint8_t *ecc_code)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_calculate called unexpectedly\n");
BUG();
return -EIO;
@@ -1340,7 +1321,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_correct called unexpectedly\n");
BUG();
return -EIO;
@@ -1349,7 +1330,7 @@ static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- dev_err(&denali->dev->dev,
+ dev_err(denali->dev,
"denali_ecc_hwctl called unexpectedly\n");
BUG();
}
@@ -1375,6 +1356,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
/* Should set value for these registers when init */
iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->flash_reg + ECC_ENABLE);
+ detect_max_banks(denali);
denali_nand_timing_set(denali);
denali_irq_init(denali);
}
@@ -1484,24 +1466,22 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
/* Is 32-bit DMA supported? */
- ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
-
+ ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (ret) {
printk(KERN_ERR "Spectra: no usable DMA configuration\n");
goto failed_enable_dev;
}
- denali->buf.dma_buf =
- pci_map_single(dev, denali->buf.buf,
- DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
+ if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
goto failed_enable_dev;
}
pci_set_master(dev);
- denali->dev = dev;
+ denali->dev = &dev->dev;
denali->mtd.dev.parent = &dev->dev;
ret = pci_request_regions(dev, DENALI_NAND_NAME);
@@ -1554,7 +1534,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
* with the nand subsystem */
- if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
+ if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
ret = -ENXIO;
goto failed_req_irq;
}
@@ -1664,7 +1644,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_req_irq;
}
- ret = add_mtd_device(&denali->mtd);
+ ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
ret);
@@ -1681,8 +1661,8 @@ failed_remap_reg:
failed_req_regions:
pci_release_regions(dev);
failed_dma_map:
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
failed_enable_dev:
pci_disable_device(dev);
failed_alloc_memery:
@@ -1696,7 +1676,7 @@ static void denali_pci_remove(struct pci_dev *dev)
struct denali_nand_info *denali = pci_get_drvdata(dev);
nand_release(&denali->mtd);
- del_mtd_device(&denali->mtd);
+ mtd_device_unregister(&denali->mtd);
denali_irq_cleanup(dev->irq, denali);
@@ -1704,8 +1684,8 @@ static void denali_pci_remove(struct pci_dev *dev)
iounmap(denali->flash_mem);
pci_release_regions(dev);
pci_disable_device(dev);
- pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
pci_set_drvdata(dev, NULL);
kfree(denali);
}
@@ -1721,8 +1701,7 @@ static struct pci_driver denali_pci_driver = {
static int __devinit denali_init(void)
{
- printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n",
- __DATE__, __TIME__);
+ printk(KERN_INFO "Spectra MTD driver\n");
return pci_register_driver(&denali_pci_driver);
}
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 3918bcb1561e..fabb9d56b39e 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -211,185 +211,46 @@
#define TRANSFER_MODE 0x400
#define TRANSFER_MODE__VALUE 0x0003
-#define INTR_STATUS0 0x410
-#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS0__ECC_ERR 0x0002
-#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-#define INTR_STATUS0__TIME_OUT 0x0008
-#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-#define INTR_STATUS0__ERASE_FAIL 0x0020
-#define INTR_STATUS0__LOAD_COMP 0x0040
-#define INTR_STATUS0__PROGRAM_COMP 0x0080
-#define INTR_STATUS0__ERASE_COMP 0x0100
-#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS0__LOCKED_BLK 0x0400
-#define INTR_STATUS0__UNSUP_CMD 0x0800
-#define INTR_STATUS0__INT_ACT 0x1000
-#define INTR_STATUS0__RST_COMP 0x2000
-#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-
-#define INTR_EN0 0x420
-#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN0__ECC_ERR 0x0002
-#define INTR_EN0__DMA_CMD_COMP 0x0004
-#define INTR_EN0__TIME_OUT 0x0008
-#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
-#define INTR_EN0__LOAD_COMP 0x0040
-#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
-#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
-#define INTR_EN0__UNSUP_CMD 0x0800
-#define INTR_EN0__INT_ACT 0x1000
-#define INTR_EN0__RST_COMP 0x2000
-#define INTR_EN0__PIPE_CMD_ERR 0x4000
-#define INTR_EN0__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT0 0x430
-#define PAGE_CNT0__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR0 0x440
-#define ERR_PAGE_ADDR0__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR0 0x450
-#define ERR_BLOCK_ADDR0__VALUE 0xffff
-
-#define INTR_STATUS1 0x460
-#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS1__ECC_ERR 0x0002
-#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-#define INTR_STATUS1__TIME_OUT 0x0008
-#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-#define INTR_STATUS1__ERASE_FAIL 0x0020
-#define INTR_STATUS1__LOAD_COMP 0x0040
-#define INTR_STATUS1__PROGRAM_COMP 0x0080
-#define INTR_STATUS1__ERASE_COMP 0x0100
-#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS1__LOCKED_BLK 0x0400
-#define INTR_STATUS1__UNSUP_CMD 0x0800
-#define INTR_STATUS1__INT_ACT 0x1000
-#define INTR_STATUS1__RST_COMP 0x2000
-#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-
-#define INTR_EN1 0x470
-#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN1__ECC_ERR 0x0002
-#define INTR_EN1__DMA_CMD_COMP 0x0004
-#define INTR_EN1__TIME_OUT 0x0008
-#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
-#define INTR_EN1__LOAD_COMP 0x0040
-#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
-#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
-#define INTR_EN1__UNSUP_CMD 0x0800
-#define INTR_EN1__INT_ACT 0x1000
-#define INTR_EN1__RST_COMP 0x2000
-#define INTR_EN1__PIPE_CMD_ERR 0x4000
-#define INTR_EN1__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT1 0x480
-#define PAGE_CNT1__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR1 0x490
-#define ERR_PAGE_ADDR1__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR1 0x4a0
-#define ERR_BLOCK_ADDR1__VALUE 0xffff
-
-#define INTR_STATUS2 0x4b0
-#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS2__ECC_ERR 0x0002
-#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-#define INTR_STATUS2__TIME_OUT 0x0008
-#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-#define INTR_STATUS2__ERASE_FAIL 0x0020
-#define INTR_STATUS2__LOAD_COMP 0x0040
-#define INTR_STATUS2__PROGRAM_COMP 0x0080
-#define INTR_STATUS2__ERASE_COMP 0x0100
-#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS2__LOCKED_BLK 0x0400
-#define INTR_STATUS2__UNSUP_CMD 0x0800
-#define INTR_STATUS2__INT_ACT 0x1000
-#define INTR_STATUS2__RST_COMP 0x2000
-#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-
-#define INTR_EN2 0x4c0
-#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN2__ECC_ERR 0x0002
-#define INTR_EN2__DMA_CMD_COMP 0x0004
-#define INTR_EN2__TIME_OUT 0x0008
-#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
-#define INTR_EN2__LOAD_COMP 0x0040
-#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
-#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
-#define INTR_EN2__UNSUP_CMD 0x0800
-#define INTR_EN2__INT_ACT 0x1000
-#define INTR_EN2__RST_COMP 0x2000
-#define INTR_EN2__PIPE_CMD_ERR 0x4000
-#define INTR_EN2__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT2 0x4d0
-#define PAGE_CNT2__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR2 0x4e0
-#define ERR_PAGE_ADDR2__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR2 0x4f0
-#define ERR_BLOCK_ADDR2__VALUE 0xffff
-
-#define INTR_STATUS3 0x500
-#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS3__ECC_ERR 0x0002
-#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-#define INTR_STATUS3__TIME_OUT 0x0008
-#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-#define INTR_STATUS3__ERASE_FAIL 0x0020
-#define INTR_STATUS3__LOAD_COMP 0x0040
-#define INTR_STATUS3__PROGRAM_COMP 0x0080
-#define INTR_STATUS3__ERASE_COMP 0x0100
-#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS3__LOCKED_BLK 0x0400
-#define INTR_STATUS3__UNSUP_CMD 0x0800
-#define INTR_STATUS3__INT_ACT 0x1000
-#define INTR_STATUS3__RST_COMP 0x2000
-#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-
-#define INTR_EN3 0x510
-#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN3__ECC_ERR 0x0002
-#define INTR_EN3__DMA_CMD_COMP 0x0004
-#define INTR_EN3__TIME_OUT 0x0008
-#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
-#define INTR_EN3__LOAD_COMP 0x0040
-#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
-#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
-#define INTR_EN3__UNSUP_CMD 0x0800
-#define INTR_EN3__INT_ACT 0x1000
-#define INTR_EN3__RST_COMP 0x2000
-#define INTR_EN3__PIPE_CMD_ERR 0x4000
-#define INTR_EN3__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT3 0x520
-#define PAGE_CNT3__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR3 0x530
-#define ERR_PAGE_ADDR3__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR3 0x540
-#define ERR_BLOCK_ADDR3__VALUE 0xffff
+#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50))
+#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50))
+
+#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS__ECC_ERR 0x0002
+#define INTR_STATUS__DMA_CMD_COMP 0x0004
+#define INTR_STATUS__TIME_OUT 0x0008
+#define INTR_STATUS__PROGRAM_FAIL 0x0010
+#define INTR_STATUS__ERASE_FAIL 0x0020
+#define INTR_STATUS__LOAD_COMP 0x0040
+#define INTR_STATUS__PROGRAM_COMP 0x0080
+#define INTR_STATUS__ERASE_COMP 0x0100
+#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS__LOCKED_BLK 0x0400
+#define INTR_STATUS__UNSUP_CMD 0x0800
+#define INTR_STATUS__INT_ACT 0x1000
+#define INTR_STATUS__RST_COMP 0x2000
+#define INTR_STATUS__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS__PAGE_XFER_INC 0x8000
+
+#define INTR_EN__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN__ECC_ERR 0x0002
+#define INTR_EN__DMA_CMD_COMP 0x0004
+#define INTR_EN__TIME_OUT 0x0008
+#define INTR_EN__PROGRAM_FAIL 0x0010
+#define INTR_EN__ERASE_FAIL 0x0020
+#define INTR_EN__LOAD_COMP 0x0040
+#define INTR_EN__PROGRAM_COMP 0x0080
+#define INTR_EN__ERASE_COMP 0x0100
+#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN__LOCKED_BLK 0x0400
+#define INTR_EN__UNSUP_CMD 0x0800
+#define INTR_EN__INT_ACT 0x1000
+#define INTR_EN__RST_COMP 0x2000
+#define INTR_EN__PIPE_CMD_ERR 0x4000
+#define INTR_EN__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50))
+#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50))
+#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50))
#define DATA_INTR 0x550
#define DATA_INTR__WRITE_SPACE_AV 0x0001
@@ -484,141 +345,23 @@
#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
-#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_0 0x840
-#define MIN_BLK_ADDR_0__VALUE 0xffff
-
-#define MAX_BLK_ADDR_0 0x850
-#define MAX_BLK_ADDR_0__VALUE 0xffff
-
-#define MIN_MAX_BANK_0 0x860
-#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
-#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_1 0x880
-#define MIN_BLK_ADDR_1__VALUE 0xffff
-
-#define MAX_BLK_ADDR_1 0x890
-#define MAX_BLK_ADDR_1__VALUE 0xffff
-
-#define MIN_MAX_BANK_1 0x8a0
-#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
-#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_2 0x8c0
-#define MIN_BLK_ADDR_2__VALUE 0xffff
-
-#define MAX_BLK_ADDR_2 0x8d0
-#define MAX_BLK_ADDR_2__VALUE 0xffff
-
-#define MIN_MAX_BANK_2 0x8e0
-#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
-#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_3 0x900
-#define MIN_BLK_ADDR_3__VALUE 0xffff
-
-#define MAX_BLK_ADDR_3 0x910
-#define MAX_BLK_ADDR_3__VALUE 0xffff
-
-#define MIN_MAX_BANK_3 0x920
-#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
-#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_4 0x940
-#define MIN_BLK_ADDR_4__VALUE 0xffff
-
-#define MAX_BLK_ADDR_4 0x950
-#define MAX_BLK_ADDR_4__VALUE 0xffff
-
-#define MIN_MAX_BANK_4 0x960
-#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
-#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_5 0x980
-#define MIN_BLK_ADDR_5__VALUE 0xffff
-
-#define MAX_BLK_ADDR_5 0x990
-#define MAX_BLK_ADDR_5__VALUE 0xffff
-
-#define MIN_MAX_BANK_5 0x9a0
-#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
-#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_6 0x9c0
-#define MIN_BLK_ADDR_6__VALUE 0xffff
-
-#define MAX_BLK_ADDR_6 0x9d0
-#define MAX_BLK_ADDR_6__VALUE 0xffff
-
-#define MIN_MAX_BANK_6 0x9e0
-#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
-#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40))
+#define PERM_SRC_ID__SRCID 0x00ff
+#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID__READ_ACTIVE 0x4000
+#define PERM_SRC_ID__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40))
+#define MIN_BLK_ADDR__VALUE 0xffff
+
+#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40))
+#define MAX_BLK_ADDR__VALUE 0xffff
+
+#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40))
+#define MIN_MAX_BANK__MIN_VALUE 0x0003
+#define MIN_MAX_BANK__MAX_VALUE 0x000c
-#define MIN_BLK_ADDR_7 0xa00
-#define MIN_BLK_ADDR_7__VALUE 0xffff
-
-#define MAX_BLK_ADDR_7 0xa10
-#define MAX_BLK_ADDR_7__VALUE 0xffff
-
-#define MIN_MAX_BANK_7 0xa20
-#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
/* ffsdefs.h */
#define CLEAR 0 /*use this to clear a field instead of "fail"*/
@@ -711,7 +454,6 @@
#define READ_WRITE_ENABLE_HIGH_COUNT 22
#define ECC_SECTOR_SIZE 512
-#define LLD_MAX_FLASH_BANKS 4
#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
@@ -732,7 +474,7 @@ struct denali_nand_info {
int status;
int platform;
struct nand_buf buf;
- struct pci_dev *dev;
+ struct device *dev;
int total_used_banks;
uint32_t block; /* stored for future use */
uint16_t page;
@@ -751,6 +493,7 @@ struct denali_nand_info {
uint32_t totalblks;
uint32_t blksperchip;
uint32_t bbtskipbytes;
+ uint32_t max_banks;
};
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 657b9f4b6f9b..7837728d02ff 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1360,11 +1360,9 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
At least as nand_bbt.c is currently written. */
if ((ret = nand_scan_bbt(mtd, NULL)))
return ret;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_register(mtd, NULL, 0);
if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
+ mtd_device_register(mtd, parts, numparts);
return 0;
}
@@ -1419,11 +1417,9 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
autopartitioning, but I want to give it more thought. */
if (!numparts)
return -EIO;
- add_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
+ mtd_device_register(mtd, NULL, 0);
if (!no_autopart)
- add_mtd_partitions(mtd, parts, numparts);
-#endif
+ mtd_device_register(mtd, parts, numparts);
return 0;
}
@@ -1678,9 +1674,9 @@ static int __init doc_probe(unsigned long physadr)
/* DBB note: i believe nand_release is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
- /* nand_release will call del_mtd_device, but we haven't yet
- added it. This is handled without incident by
- del_mtd_device, as far as I can tell. */
+ /* nand_release will call mtd_device_unregister, but we
+ haven't yet added it. This is handled without incident by
+ mtd_device_unregister, as far as I can tell. */
nand_release(mtd);
kfree(mtd);
goto fail;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 86366bfba9f8..8400d0f6dada 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -55,7 +55,6 @@ static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash device
*/
@@ -67,8 +66,6 @@ static struct mtd_partition partition_info[] = {
#define NUM_PARTITIONS 1
-#endif
-
/*
* hardware specific access to control-lines
*
@@ -101,9 +98,7 @@ static int ep7312_device_ready(struct mtd_info *mtd)
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -162,14 +157,12 @@ static int __init ep7312_init(void)
kfree(ep7312_mtd);
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
ep7312_mtd->name = "edb7312-nand";
mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
if (mtd_parts_nb == 0) {
mtd_parts = partition_info;
mtd_parts_nb = NUM_PARTITIONS;
@@ -178,7 +171,7 @@ static int __init ep7312_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 537e380b8dcb..0bb254c7d2b1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -841,12 +841,9 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
struct fsl_elbc_mtd *priv;
struct resource res;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
-
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", NULL };
struct mtd_partition *parts;
-#endif
int ret;
int bank;
struct device *dev;
@@ -935,26 +932,19 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
if (ret < 0)
goto err;
-#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) {
ret = of_mtd_parse_partitions(priv->dev, node, &parts);
if (ret < 0)
goto err;
}
-#endif
- if (ret > 0)
- add_mtd_partitions(&priv->mtd, parts, ret);
- else
-#endif
- add_mtd_device(&priv->mtd);
+ mtd_device_register(&priv->mtd, parts, ret);
printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 073ee026a17c..23752fd5bc59 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -33,10 +33,7 @@ struct fsl_upm_nand {
struct mtd_info mtd;
struct nand_chip chip;
int last_ctrl;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
-
struct fsl_upm upm;
uint8_t upm_addr_offset;
uint8_t upm_cmd_offset;
@@ -161,9 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
{
int ret;
struct device_node *flash_np;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_types[] = { "cmdlinepart", NULL, };
-#endif
fun->chip.IO_ADDR_R = fun->io_base;
fun->chip.IO_ADDR_W = fun->io_base;
@@ -197,7 +192,6 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
@@ -207,11 +201,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
goto err;
}
#endif
- if (ret > 0)
- ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
- else
-#endif
- ret = add_mtd_device(&fun->mtd);
+ ret = mtd_device_register(&fun->mtd, fun->parts, ret);
err:
of_node_put(flash_np);
return ret;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 0d45ef3883e8..e9b275ac381c 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -120,8 +120,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Default partition tables to be used if the partition information not
* provided through platform data.
@@ -182,7 +180,6 @@ static struct mtd_partition partition_info_128KB_blk[] = {
#ifdef CONFIG_MTD_CMDLINE_PARTS
const char *part_probes[] = { "cmdlinepart", NULL };
#endif
-#endif
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
@@ -719,7 +716,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* platform data,
* default partition information present in driver.
*/
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
/*
* Check if partition info passed via command line
@@ -777,19 +773,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
#endif
- if (host->partitions) {
- ret = add_mtd_partitions(&host->mtd, host->partitions,
- host->nr_partitions);
- if (ret)
- goto err_probe;
- }
-#else
- dev_info(&pdev->dev, "Registering %s as whole device\n", mtd->name);
- if (!add_mtd_device(mtd)) {
- ret = -ENXIO;
+ ret = mtd_device_register(&host->mtd, host->partitions,
+ host->nr_partitions);
+ if (ret)
goto err_probe;
- }
-#endif
platform_set_drvdata(pdev, host);
dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
@@ -835,11 +822,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (host) {
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(&host->mtd);
-#else
- del_mtd_device(&host->mtd);
-#endif
+ mtd_device_unregister(&host->mtd);
clk_disable(host->clk);
clk_put(host->clk);
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 0cde618bcc1e..2c2060b2800e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -316,8 +316,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
platform_set_drvdata(dev, gpiomtd);
return 0;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index f8ce79b446ed..02a03e67109c 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -38,7 +38,6 @@ static struct mtd_info *h1910_nand_mtd = NULL;
* Module stuff
*/
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash device
*/
@@ -50,8 +49,6 @@ static struct mtd_partition partition_info[] = {
#define NUM_PARTITIONS 1
-#endif
-
/*
* hardware specific access to control-lines
*
@@ -154,7 +151,7 @@ static int __init h1910_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index cea38a5d4ac5..6e813daed068 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -299,10 +299,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partition_info;
int num_partitions = 0;
-#endif
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
if (!nand) {
@@ -375,7 +373,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
goto err_gpio_free;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partition_info, 0);
@@ -384,12 +381,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
num_partitions = pdata->num_partitions;
partition_info = pdata->partitions;
}
-
- if (num_partitions > 0)
- ret = add_mtd_partitions(mtd, partition_info, num_partitions);
- else
-#endif
- ret = add_mtd_device(mtd);
+ ret = mtd_device_register(mtd, partition_info, num_partitions);
if (ret) {
dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 0b81b5b499d1..2f7c930872f9 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,9 +131,7 @@ struct mpc5121_nfc_prv {
static void mpc5121_nfc_done(struct mtd_info *mtd);
-#ifdef CONFIG_MTD_PARTITIONS
static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
-#endif
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
@@ -658,9 +656,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const __be32 *chips_no;
@@ -841,7 +837,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
-#ifdef CONFIG_MTD_PARTITIONS
retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
if (retval == 0)
@@ -854,12 +849,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
goto error;
}
- if (retval > 0)
- retval = add_mtd_partitions(mtd, parts, retval);
- else
-#endif
- retval = add_mtd_device(mtd);
-
+ retval = mtd_device_register(mtd, parts, retval);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 42a95fb41504..90df34c4d26c 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -56,8 +56,14 @@
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
-#define NFC_V21_UNLOCKSTART_BLKADDR (host->regs + 0x20)
-#define NFC_V21_UNLOCKEND_BLKADDR (host->regs + 0x22)
+#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
@@ -152,6 +158,7 @@ struct mxc_nand_host {
int clk_act;
int irq;
int eccsize;
+ int active_cs;
struct completion op_completion;
@@ -236,9 +243,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
-#endif
static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
@@ -445,7 +450,7 @@ static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
for (i = 0; i < bufs; i++) {
/* NANDFC buffer 0 is used for page read/write */
- writew(i, NFC_V1_V2_BUF_ADDR);
+ writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
writew(ops, NFC_V1_V2_CONFIG2);
@@ -470,7 +475,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
struct nand_chip *this = &host->nand;
/* NANDFC buffer 0 is used for device ID output */
- writew(0x0, NFC_V1_V2_BUF_ADDR);
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
writew(NFC_ID, NFC_V1_V2_CONFIG2);
@@ -505,7 +510,7 @@ static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
uint32_t store;
uint16_t ret;
- writew(0x0, NFC_V1_V2_BUF_ADDR);
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
/*
* The device status is stored in main_area0. To
@@ -686,24 +691,24 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- switch (chip) {
- case -1:
+ if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
clk_disable(host->clk);
host->clk_act = 0;
}
- break;
- case 0:
+ return;
+ }
+
+ if (!host->clk_act) {
/* Enable the NFC clock */
- if (!host->clk_act) {
- clk_enable(host->clk);
- host->clk_act = 1;
- }
- break;
+ clk_enable(host->clk);
+ host->clk_act = 1;
+ }
- default:
- break;
+ if (nfc_is_v21()) {
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
}
@@ -834,8 +839,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
/* Blocks to be unlocked */
if (nfc_is_v21()) {
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
} else if (nfc_is_v1()) {
writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
@@ -1200,7 +1211,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
irq_control_v1_v2(host, 1);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1, NULL)) {
+ if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -1220,18 +1231,15 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Register the partitions */
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts =
parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
if (nr_parts > 0)
- add_mtd_partitions(mtd, host->parts, nr_parts);
+ mtd_device_register(mtd, host->parts, nr_parts);
else if (pdata->parts)
- add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
- else
-#endif
- {
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ else {
pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_device(mtd);
+ mtd_device_register(mtd, NULL, 0);
}
platform_set_drvdata(pdev, host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c54a4cbac6bc..a46e9bb847bd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -47,10 +47,7 @@
#include <linux/bitops.h>
#include <linux/leds.h>
#include <linux/io.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
#include <linux/mtd/partitions.h>
-#endif
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
@@ -976,9 +973,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
- /* de-select the NAND device */
- chip->select_chip(mtd, -1);
-
nand_release_device(mtd);
return ret;
@@ -1046,9 +1040,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
- /* de-select the NAND device */
- chip->select_chip(mtd, -1);
-
nand_release_device(mtd);
return ret;
@@ -3112,6 +3103,8 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ chip->badblockbits = 8;
+
/* Set the bad block position */
if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
@@ -3539,12 +3532,7 @@ void nand_release(struct mtd_info *mtd)
if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
-#ifdef CONFIG_MTD_PARTITIONS
- /* Deregister partitions */
- del_mtd_partitions(mtd);
-#endif
- /* Deregister the device */
- del_mtd_device(mtd);
+ mtd_device_unregister(mtd);
/* Free bad block table memory */
kfree(chip->bbt);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index af46428286fe..ccbeaa1e4a8e 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1276,20 +1276,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
* while scanning a device for factory marked good / bad blocks. */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static struct nand_bbt_descr smallpage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = NAND_SMALL_BADBLOCK_POS,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = NAND_LARGE_BADBLOCK_POS,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
static struct nand_bbt_descr agand_flashbased = {
@@ -1355,10 +1341,6 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
*
- * TODO: Handle other flags, replace other static structs
- * (e.g. handle NAND_BBT_FLASH for flash-based BBT,
- * replace smallpage_flashbased)
- *
*/
static int nand_create_default_bbt_descr(struct nand_chip *this)
{
@@ -1422,15 +1404,14 @@ int nand_default_bbt(struct mtd_info *mtd)
this->bbt_md = &bbt_mirror_descr;
}
}
- if (!this->badblock_pattern) {
- this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
- }
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
- if (!this->badblock_pattern)
- nand_create_default_bbt_descr(this);
}
+
+ if (!this->badblock_pattern)
+ nand_create_default_bbt_descr(this);
+
return nand_scan_bbt(mtd, this->badblock_pattern);
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 893d95bfea48..357e8c5252a8 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2383,7 +2383,9 @@ static int __init ns_init_module(void)
goto err_exit;
/* Register NAND partitions */
- if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0)
+ retval = mtd_device_register(nsmtd, &nand->partitions[0],
+ nand->nbparts);
+ if (retval != 0)
goto err_exit;
return 0;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index bbe6d451290d..ea2dea8a9c88 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -33,6 +33,7 @@
#include <linux/of_platform.h>
#include <asm/io.h>
+#define NDFC_MAX_CS 4
struct ndfc_controller {
struct platform_device *ofdev;
@@ -41,17 +42,16 @@ struct ndfc_controller {
struct nand_chip chip;
int chip_select;
struct nand_hw_control ndfc_control;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
-static struct ndfc_controller ndfc_ctrl;
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
static void ndfc_select_chip(struct mtd_info *mtd, int chip)
{
uint32_t ccr;
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *nchip = mtd->priv;
+ struct ndfc_controller *ndfc = nchip->priv;
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
if (chip >= 0) {
@@ -64,7 +64,8 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
if (cmd == NAND_CMD_NONE)
return;
@@ -77,7 +78,8 @@ static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
static int ndfc_ready(struct mtd_info *mtd)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
}
@@ -85,7 +87,8 @@ static int ndfc_ready(struct mtd_info *mtd)
static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
{
uint32_t ccr;
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
ccr |= NDFC_CCR_RESET_ECC;
@@ -96,7 +99,8 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
static int ndfc_calculate_ecc(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t ecc;
uint8_t *p = (uint8_t *)&ecc;
@@ -119,7 +123,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
*/
static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -128,7 +133,8 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -137,7 +143,8 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = chip->priv;
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
@@ -152,13 +159,11 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct device_node *node)
{
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_types[] = { "cmdlinepart", NULL };
#else
static const char *part_types[] = { NULL };
#endif
-#endif
struct device_node *flash_np;
struct nand_chip *chip = &ndfc->chip;
int ret;
@@ -179,6 +184,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
+ chip->priv = ndfc;
ndfc->mtd.priv = chip;
ndfc->mtd.owner = THIS_MODULE;
@@ -198,25 +204,18 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (ret)
goto err;
-#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
if (ret < 0)
goto err;
-#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) {
ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
&ndfc->parts);
if (ret < 0)
goto err;
}
-#endif
- if (ret > 0)
- ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
- else
-#endif
- ret = add_mtd_device(&ndfc->mtd);
+ ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
err:
of_node_put(flash_np);
@@ -227,15 +226,10 @@ err:
static int __devinit ndfc_probe(struct platform_device *ofdev)
{
- struct ndfc_controller *ndfc = &ndfc_ctrl;
+ struct ndfc_controller *ndfc;
const __be32 *reg;
u32 ccr;
- int err, len;
-
- spin_lock_init(&ndfc->ndfc_control.lock);
- init_waitqueue_head(&ndfc->ndfc_control.wq);
- ndfc->ofdev = ofdev;
- dev_set_drvdata(&ofdev->dev, ndfc);
+ int err, len, cs;
/* Read the reg property to get the chip select */
reg = of_get_property(ofdev->dev.of_node, "reg", &len);
@@ -243,7 +237,20 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
- ndfc->chip_select = be32_to_cpu(reg[0]);
+
+ cs = be32_to_cpu(reg[0]);
+ if (cs >= NDFC_MAX_CS) {
+ dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+ return -EINVAL;
+ }
+
+ ndfc = &ndfc_ctrl[cs];
+ ndfc->chip_select = cs;
+
+ spin_lock_init(&ndfc->ndfc_control.lock);
+ init_waitqueue_head(&ndfc->ndfc_control.wq);
+ ndfc->ofdev = ofdev;
+ dev_set_drvdata(&ofdev->dev, ndfc);
ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a045a4a581b6..b6a5c86ab31e 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -158,12 +158,7 @@ static int nomadik_nand_probe(struct platform_device *pdev)
goto err_unmap;
}
-#ifdef CONFIG_MTD_PARTITIONS
- add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts);
-#else
- pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_device(mtd);
-#endif
+ mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
platform_set_drvdata(pdev, host);
return 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 6eddf7361ed7..9c30a0b03171 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -321,8 +321,8 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
goto fail3;
}
- add_mtd_partitions(&(nuc900_nand->mtd), partitions,
- ARRAY_SIZE(partitions));
+ mtd_device_register(&(nuc900_nand->mtd), partitions,
+ ARRAY_SIZE(partitions));
platform_set_drvdata(pdev, nuc900_nand);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index da9a351c9d79..0db2c0e7656a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -94,9 +94,7 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
@@ -263,11 +261,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
- omap_read_buf16(mtd, buf, len);
+ omap_read_buf16(mtd, (u_char *)p, len);
else
- omap_read_buf8(mtd, buf, len);
+ omap_read_buf8(mtd, (u_char *)p, len);
} else {
- p = (u32 *) buf;
do {
r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
r_count = r_count >> 2;
@@ -293,7 +290,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd);
uint32_t w_count = 0;
int i = 0, ret = 0;
- u16 *p;
+ u16 *p = (u16 *)buf;
unsigned long tim, limit;
/* take care of subpage writes */
@@ -309,11 +306,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
- omap_write_buf16(mtd, buf, len);
+ omap_write_buf16(mtd, (u_char *)p, len);
else
- omap_write_buf8(mtd, buf, len);
+ omap_write_buf8(mtd, (u_char *)p, len);
} else {
- p = (u16 *) buf;
while (len) {
w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
w_count = w_count >> 1;
@@ -1073,9 +1069,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
/* DIP switches on some boards change between 8 and 16 bit
* bus widths for flash. Try the other width if the first try fails.
*/
- if (nand_scan(&info->mtd, 1)) {
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
info->nand.options ^= NAND_BUSWIDTH_16;
- if (nand_scan(&info->mtd, 1)) {
+ if (nand_scan_ident(&info->mtd, 1, NULL)) {
err = -ENXIO;
goto out_release_mem_region;
}
@@ -1101,15 +1097,19 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.ecc.layout = &omap_oobinfo;
}
-#ifdef CONFIG_MTD_PARTITIONS
+ /* second phase scan */
+ if (nand_scan_tail(&info->mtd)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
- add_mtd_partitions(&info->mtd, info->parts, err);
+ mtd_device_register(&info->mtd, info->parts, err);
else if (pdata->parts)
- add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- add_mtd_device(&info->mtd);
+ mtd_device_register(&info->mtd, NULL, 0);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index da6e75343052..7794d0680f91 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,9 +21,7 @@
#include <mach/hardware.h>
#include <plat/orion_nand.h>
-#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
@@ -83,10 +81,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *io_base;
int ret = 0;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_part = 0;
-#endif
nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!nc) {
@@ -136,7 +132,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
goto no_dev;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd->name = "orion_nand";
num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
@@ -147,14 +142,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
partitions = board->parts;
}
- if (partitions && num_part > 0)
- ret = add_mtd_partitions(mtd, partitions, num_part);
- else
- ret = add_mtd_device(mtd);
-#else
- ret = add_mtd_device(mtd);
-#endif
-
+ ret = mtd_device_register(mtd, partitions, num_part);
if (ret) {
nand_release(mtd);
goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 20bfe5f15afd..b1aa41b8a4eb 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -163,7 +163,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
goto out_lpc;
}
- if (add_mtd_device(pasemi_nand_mtd)) {
+ if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
err = -ENODEV;
goto out_lpc;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index caf5a736340a..633c04bf76f6 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,10 +21,8 @@ struct plat_nand_data {
struct nand_chip chip;
struct mtd_info mtd;
void __iomem *io_base;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
-#endif
};
/*
@@ -101,13 +99,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
-#ifdef CONFIG_MTD_PARTITIONS
if (pdata->chip.part_probe_types) {
err = parse_mtd_partitions(&data->mtd,
pdata->chip.part_probe_types,
&data->parts, 0);
if (err > 0) {
- add_mtd_partitions(&data->mtd, data->parts, err);
+ mtd_device_register(&data->mtd, data->parts, err);
return 0;
}
}
@@ -115,11 +112,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
- err = add_mtd_partitions(&data->mtd, data->parts,
+ err = mtd_device_register(&data->mtd, data->parts,
pdata->chip.nr_partitions);
} else
-#endif
- err = add_mtd_device(&data->mtd);
+ err = mtd_device_register(&data->mtd, NULL, 0);
if (!err)
return err;
@@ -149,10 +145,8 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
-#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
-#endif
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index cc8658431851..3bbb796b451c 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -73,7 +73,6 @@ __setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
#endif
-#ifdef CONFIG_MTD_PARTITIONS
/*
* Define static partitions for flash devices
*/
@@ -101,7 +100,6 @@ static struct mtd_partition partition_info_evb[] = {
#define NUM_PARTITIONS 1
extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
-#endif
/*
* hardware specific access to control-lines
@@ -189,10 +187,8 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
}
#endif
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
const char *part_probes_evb[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -284,14 +280,13 @@ static int __init ppchameleonevb_init(void)
this->chip_delay = NAND_SMALL_DELAY_US;
#endif
-#ifdef CONFIG_MTD_PARTITIONS
ppchameleon_mtd->name = "ppchameleon-nand";
mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
+
if (mtd_parts_nb == 0) {
if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
mtd_parts = partition_info_me;
@@ -303,7 +298,7 @@ static int __init ppchameleonevb_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
nand_evb_init:
/****************************
@@ -385,14 +380,14 @@ static int __init ppchameleonevb_init(void)
iounmap(ppchameleon_fio_base);
return -ENXIO;
}
-#ifdef CONFIG_MTD_PARTITIONS
+
ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
-#endif
+
if (mtd_parts_nb == 0) {
mtd_parts = partition_info_evb;
mtd_parts_nb = NUM_PARTITIONS;
@@ -401,7 +396,7 @@ static int __init ppchameleonevb_init(void)
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
+ mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ff0701276d65..1fb3b3a80581 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1119,10 +1119,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
clk_put(info->clk);
if (mtd) {
- del_mtd_device(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mtd);
-#endif
+ mtd_device_unregister(mtd);
kfree(mtd);
}
return 0;
@@ -1149,7 +1146,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
-#ifdef CONFIG_MTD_PARTITIONS
if (mtd_has_cmdlinepart()) {
const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
@@ -1158,13 +1154,10 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (nr_parts)
- return add_mtd_partitions(info->mtd, parts, nr_parts);
+ return mtd_device_register(info->mtd, parts, nr_parts);
}
- return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
-#else
- return 0;
-#endif
+ return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
}
#ifdef CONFIG_PM
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 67440b5beef8..c9f9127ff770 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -580,7 +580,8 @@ static int __init rtc_from4_init(void)
#endif
/* Register the partitions */
- ret = add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
+ ret = mtd_device_register(rtc_from4_mtd, partition_info,
+ NUM_PARTITIONS);
if (ret)
goto err_3;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 33d832dddfdd..4405468f196b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -55,7 +55,7 @@ static int hardware_ecc = 0;
#endif
#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-static int clock_stop = 1;
+static const int clock_stop = 1;
#else
static const int clock_stop = 0;
#endif
@@ -96,6 +96,12 @@ enum s3c_cpu_type {
TYPE_S3C2440,
};
+enum s3c_nand_clk_state {
+ CLOCK_DISABLE = 0,
+ CLOCK_ENABLE,
+ CLOCK_SUSPEND,
+};
+
/* overview of the s3c2410 nand state */
/**
@@ -111,6 +117,7 @@ enum s3c_cpu_type {
* @mtd_count: The number of MTDs created from this controller.
* @save_sel: The contents of @sel_reg to be saved over suspend.
* @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
*/
struct s3c2410_nand_info {
@@ -129,6 +136,7 @@ struct s3c2410_nand_info {
int mtd_count;
unsigned long save_sel;
unsigned long clk_rate;
+ enum s3c_nand_clk_state clk_state;
enum s3c_cpu_type cpu_type;
@@ -159,11 +167,33 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
return dev->dev.platform_data;
}
-static inline int allow_clk_stop(struct s3c2410_nand_info *info)
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
return clock_stop;
}
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+ enum s3c_nand_clk_state new_state)
+{
+ if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+ return;
+
+ if (info->clk_state == CLOCK_ENABLE) {
+ if (new_state != CLOCK_ENABLE)
+ clk_disable(info->clk);
+ } else {
+ if (new_state == CLOCK_ENABLE)
+ clk_enable(info->clk);
+ }
+
+ info->clk_state = new_state;
+}
+
/* timing calculations */
#define NS_IN_KHZ 1000000
@@ -333,8 +363,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
nmtd = this->priv;
info = nmtd->info;
- if (chip != -1 && allow_clk_stop(info))
- clk_enable(info->clk);
+ if (chip != -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
cur = readl(info->sel_reg);
@@ -356,8 +386,8 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
writel(cur, info->sel_reg);
- if (chip == -1 && allow_clk_stop(info))
- clk_disable(info->clk);
+ if (chip == -1)
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
/* s3c2410_nand_hwcontrol
@@ -694,8 +724,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
/* free the common resources */
if (info->clk != NULL && !IS_ERR(info->clk)) {
- if (!allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
clk_put(info->clk);
}
@@ -715,7 +744,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
@@ -725,7 +753,7 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
int nr_part = 0;
if (set == NULL)
- return add_mtd_device(&mtd->mtd);
+ return mtd_device_register(&mtd->mtd, NULL, 0);
mtd->mtd.name = set->name;
nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
@@ -735,19 +763,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
part_info = set->partitions;
}
- if (nr_part > 0 && part_info)
- return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
-
- return add_mtd_device(&mtd->mtd);
-}
-#else
-static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *mtd,
- struct s3c2410_nand_set *set)
-{
- return add_mtd_device(&mtd->mtd);
+ return mtd_device_register(&mtd->mtd, part_info, nr_part);
}
-#endif
/**
* s3c2410_nand_init_chip - initialise a single instance of an chip
@@ -947,7 +964,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
goto exit_error;
}
- clk_enable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
/* allocate and map the resource */
@@ -1026,9 +1043,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
goto exit_error;
}
- if (allow_clk_stop(info)) {
+ if (allow_clk_suspend(info)) {
dev_info(&pdev->dev, "clock idle support enabled\n");
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
pr_debug("initialised ok\n");
@@ -1059,8 +1076,7 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
writel(info->save_sel | info->sel_bit, info->sel_reg);
- if (!allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
}
return 0;
@@ -1072,7 +1088,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
unsigned long sel;
if (info) {
- clk_enable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
s3c2410_nand_inithw(info);
/* Restore the state of the nFCE line. */
@@ -1082,8 +1098,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
sel |= info->save_sel & info->sel_bit;
writel(sel, info->sel_reg);
- if (allow_clk_stop(info))
- clk_disable(info->clk);
+ s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 81bbb5ee148d..93b1f74321c2 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -867,7 +867,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
if (ret)
goto err;
- add_mtd_partitions(flctl_mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 54ec7542a7b7..19e24ed089ea 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,9 +103,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
return readb(sharpsl->io + ECCCNTR) != 0;
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Main initialization routine
@@ -113,10 +111,8 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *sharpsl_partition_info;
int nr_partitions;
-#endif
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
@@ -188,18 +184,14 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
/* Register the partitions */
sharpsl->mtd.name = "sharpsl-nand";
-#ifdef CONFIG_MTD_PARTITIONS
nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
if (nr_partitions <= 0) {
nr_partitions = data->nr_partitions;
sharpsl_partition_info = data->partitions;
}
- if (nr_partitions > 0)
- err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
- else
-#endif
- err = add_mtd_device(&sharpsl->mtd);
+ err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
+ nr_partitions);
if (err)
goto err_add;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 57cc80cd01a3..b6332e83b289 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -139,7 +139,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
if (ret)
return ret;
- return add_mtd_device(mtd);
+ return mtd_device_register(mtd, NULL, 0);
}
EXPORT_SYMBOL_GPL(sm_register_device);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a853548986f0..ca2d0555729e 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,9 +155,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
return 1;
}
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
/*
* Probe for the NAND device.
@@ -168,11 +166,8 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int res;
-
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *partitions = NULL;
int num_partitions = 0;
-#endif
/* Allocate memory for the device structure (and zero it) */
host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -230,7 +225,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto out;
}
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
num_partitions = parse_mtd_partitions(mtd, part_probes,
&partitions, 0);
@@ -240,7 +234,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
}
#endif
-#ifdef CONFIG_MTD_OF_PARTS
if (num_partitions == 0) {
num_partitions = of_mtd_parse_partitions(&ofdev->dev,
ofdev->dev.of_node,
@@ -250,19 +243,12 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
goto release;
}
}
-#endif
- if (partitions && (num_partitions > 0))
- res = add_mtd_partitions(mtd, partitions, num_partitions);
- else
-#endif
- res = add_mtd_device(mtd);
+ res = mtd_device_register(mtd, partitions, num_partitions);
if (!res)
return res;
-#ifdef CONFIG_MTD_PARTITIONS
release:
-#endif
nand_release(mtd);
out:
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 0cc6d0acb8fe..bef76cd7c24c 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -149,7 +149,7 @@ static int __init spia_init(void)
}
/* Register the partitions */
- add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS);
+ mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
/* Return happy */
return 0;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index c004e474631b..11e8371b5683 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -381,10 +381,8 @@ static int tmio_probe(struct platform_device *dev)
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
int nbparts = 0;
-#endif
int retval;
if (data == NULL)
@@ -463,7 +461,6 @@ static int tmio_probe(struct platform_device *dev)
goto err_scan;
}
/* Register the partitions */
-#ifdef CONFIG_MTD_PARTITIONS
#ifdef CONFIG_MTD_CMDLINE_PARTS
nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
#endif
@@ -472,12 +469,7 @@ static int tmio_probe(struct platform_device *dev)
nbparts = data->num_partitions;
}
- if (nbparts)
- retval = add_mtd_partitions(mtd, parts, nbparts);
- else
-#endif
- retval = add_mtd_device(mtd);
-
+ retval = mtd_device_register(mtd, parts, nbparts);
if (!retval)
return retval;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ca270a4881a4..bfba4e39a6c5 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,9 +74,7 @@ struct txx9ndfmc_drvdata {
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_hw_control hw_control;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
-#endif
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -289,9 +287,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *probes[] = { "cmdlinepart", NULL };
-#endif
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
@@ -337,9 +333,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
-#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
-#endif
if (!(plat->ch_mask & (1 << i)))
continue;
@@ -399,13 +393,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
}
mtd->name = txx9_priv->mtdname;
-#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mtd, probes,
&drvdata->parts[i], 0);
- if (nr_parts > 0)
- add_mtd_partitions(mtd, drvdata->parts[i], nr_parts);
-#endif
- add_mtd_device(mtd);
+ mtd_device_register(mtd, drvdata->parts[i], nr_parts);
drvdata->mtds[i] = mtd;
}
@@ -431,9 +421,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
txx9_priv = chip->priv;
nand_release(mtd);
-#ifdef CONFIG_MTD_PARTITIONS
kfree(drvdata->parts[i]);
-#endif
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 4f426195f8db..772ad2966619 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,7 +1,6 @@
menuconfig MTD_ONENAND
tristate "OneNAND Device Support"
depends on MTD
- select MTD_PARTITIONS
help
This enables support for accessing all type of OneNAND flash
devices. For further information see
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ac08750748a3..2d70d354d846 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,9 +30,7 @@
*/
#define DRIVER_NAME "onenand-flash"
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
struct onenand_info {
struct mtd_info mtd;
@@ -75,15 +73,13 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_iounmap;
}
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
- add_mtd_partitions(&info->mtd, info->parts, err);
+ mtd_device_register(&info->mtd, info->parts, err);
else if (err <= 0 && pdata && pdata->parts)
- add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- err = add_mtd_device(&info->mtd);
+ err = mtd_device_register(&info->mtd, NULL, 0);
platform_set_drvdata(pdev, info);
@@ -108,11 +104,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (info) {
- if (info->parts)
- del_mtd_partitions(&info->mtd);
- else
- del_mtd_device(&info->mtd);
-
+ mtd_device_unregister(&info->mtd);
onenand_release(&info->mtd);
release_mem_region(res->start, size);
iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 1fcb41adab07..a916dec29215 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -67,9 +67,7 @@ struct omap2_onenand {
struct regulator *regulator;
};
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
@@ -755,15 +753,13 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
-#ifdef CONFIG_MTD_PARTITIONS
r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
if (r > 0)
- r = add_mtd_partitions(&c->mtd, c->parts, r);
+ r = mtd_device_register(&c->mtd, c->parts, r);
else if (pdata->parts != NULL)
- r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
+ r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- r = add_mtd_device(&c->mtd);
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 56a8b2005bda..ac9e959802a7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -65,11 +65,11 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
" : 2 -> 1st Block lock"
" : 3 -> BOTH OTP Block and 1st Block lock");
-/**
- * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
- * For now, we expose only 64 out of 80 ecc bytes
+/*
+ * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
*/
-static struct nand_ecclayout onenand_oob_128 = {
+static struct nand_ecclayout flexonenand_oob_128 = {
.eccbytes = 64,
.eccpos = {
6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -86,6 +86,35 @@ static struct nand_ecclayout onenand_oob_128 = {
}
};
+/*
+ * onenand_oob_128 - oob info for OneNAND with 4KB page
+ *
+ * Based on specification:
+ * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
+ *
+ * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
+ *
+ * oobfree uses the spare area fields marked as
+ * "Managed by internal ECC logic for Logical Sector Number area"
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 119
+ },
+ .oobfree = {
+ {2, 3}, {18, 3}, {34, 3}, {50, 3},
+ {66, 3}, {82, 3}, {98, 3}, {114, 3}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -2424,7 +2453,7 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd,
len -= block_size;
addr += block_size;
- if (addr == region_end) {
+ if (region && addr == region_end) {
if (!len)
break;
region++;
@@ -4018,8 +4047,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
*/
switch (mtd->oobsize) {
case 128:
- this->ecclayout = &onenand_oob_128;
- mtd->subpage_sft = 0;
+ if (FLEXONENAND(this)) {
+ this->ecclayout = &flexonenand_oob_128;
+ mtd->subpage_sft = 0;
+ } else {
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 2;
+ }
break;
case 64:
this->ecclayout = &onenand_oob_64;
@@ -4108,12 +4142,8 @@ void onenand_release(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
-#ifdef CONFIG_MTD_PARTITIONS
/* Deregister partitions */
- del_mtd_partitions (mtd);
-#endif
- /* Deregister the device */
- del_mtd_device (mtd);
+ mtd_device_unregister(mtd);
/* Free bad block table memory, if allocated */
if (this->bbm) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index 5ef3bd547772..85399e3accda 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -539,7 +539,8 @@ static int __init onenand_sim_init(void)
return -ENXIO;
}
- add_mtd_partitions(&info->mtd, info->parts, ARRAY_SIZE(os_partitions));
+ mtd_device_register(&info->mtd, info->parts,
+ ARRAY_SIZE(os_partitions));
return 0;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index a4c74a9ba430..3306b5b3c736 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,9 +147,7 @@ struct s3c_onenand {
struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
-#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
-#endif
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -159,9 +157,7 @@ struct s3c_onenand {
static struct s3c_onenand *onenand;
-#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
-#endif
static inline int s3c_read_reg(int offset)
{
@@ -1021,15 +1017,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
-#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
if (err > 0)
- add_mtd_partitions(mtd, onenand->parts, err);
+ mtd_device_register(mtd, onenand->parts, err);
else if (err <= 0 && pdata && pdata->parts)
- add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
-#endif
- err = add_mtd_device(mtd);
+ err = mtd_device_register(mtd, NULL, 0);
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 9aa81584c8a2..941bc3c05d6e 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -365,7 +365,7 @@ static int gluebi_create(struct ubi_device_info *di,
vi->vol_id);
mutex_unlock(&devices_mutex);
- if (add_mtd_device(mtd)) {
+ if (mtd_device_register(mtd, NULL, 0)) {
err_msg("cannot add MTD device");
kfree(mtd->name);
kfree(gluebi);
@@ -407,7 +407,7 @@ static int gluebi_remove(struct ubi_volume_info *vi)
return err;
mtd = &gluebi->mtd;
- err = del_mtd_device(mtd);
+ err = mtd_device_unregister(mtd);
if (err) {
err_msg("cannot remove fake MTD device %d, UBI device %d, "
"volume %d, error %d", mtd->index, gluebi->ubi_num,
@@ -524,7 +524,7 @@ static void __exit ubi_gluebi_exit(void)
int err;
struct mtd_info *mtd = &gluebi->mtd;
- err = del_mtd_device(mtd);
+ err = mtd_device_unregister(mtd);
if (err)
err_msg("error %d while removing gluebi MTD device %d, "
"UBI device %d, volume %d - ignoring", err,
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index d84f6e8903a5..5b732988d493 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
msleep(1);
- free_irq(*irqp, el2_probe_interrupt);
+ free_irq(*irqp, &seen);
if (!seen)
continue;
@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
continue;
if (retval < 0)
goto err_disable;
+ break;
} while (*++irqp);
if (*irqp == 0) {
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 5f25889e27ef..44b28b2d7003 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
static int nopnp;
#endif
-static int el3_common_init(struct net_device *dev);
+static int __devinit el3_common_init(struct net_device *dev);
static void el3_common_remove(struct net_device *dev);
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
static int isa_registered;
#ifdef CONFIG_PNP
-static const struct pnp_device_id el3_pnp_ids[] __devinitconst = {
+static struct pnp_device_id el3_pnp_ids[] = {
{ .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
{ .id = "TCM5091" }, /* 3Com Etherlink III */
{ .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
#endif /* CONFIG_PNP */
#ifdef CONFIG_EISA
-static const struct eisa_device_id el3_eisa_ids[] __devinitconst = {
+static struct eisa_device_id el3_eisa_ids[] = {
{ "TCM5090" },
{ "TCM5091" },
{ "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
#ifdef CONFIG_MCA
static int el3_mca_probe(struct device *dev);
-static const short el3_mca_adapter_ids[] __devinitconst = {
+static short el3_mca_adapter_ids[] __initdata = {
0x627c,
0x627d,
0x62db,
@@ -517,7 +517,7 @@ static const short el3_mca_adapter_ids[] __devinitconst = {
0x0000
};
-static const char *const el3_mca_adapter_names[] __devinitconst = {
+static char *el3_mca_adapter_names[] __initdata = {
"3Com 3c529 EtherLink III (10base2)",
"3Com 3c529 EtherLink III (10baseT)",
"3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
}
#ifdef CONFIG_MCA
-static int __devinit el3_mca_probe(struct device *device)
+static int __init el3_mca_probe(struct device *device)
{
/* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
* heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __devinit el3_mca_probe(struct device *device)
#endif /* CONFIG_MCA */
#ifdef CONFIG_EISA
-static int __devinit el3_eisa_probe (struct device *device)
+static int __init el3_eisa_probe (struct device *device)
{
short i;
int ioaddr, irq, if_port;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 99f43d275442..8cc22568ebd3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
#endif /* !CONFIG_PM */
#ifdef CONFIG_EISA
-static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = {
+static struct eisa_device_id vortex_eisa_ids[] = {
{ "TCM5920", CH_3C592 },
{ "TCM5970", CH_3C597 },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
-static int __devinit vortex_eisa_probe(struct device *device)
+static int __init vortex_eisa_probe(struct device *device)
{
void __iomem *ioaddr;
struct eisa_device *edev;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c9217f48b72..7b3e23f38913 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -50,7 +50,7 @@ static const char version[] =
#ifdef __arm__
static void write_rreg(u_long base, u_int reg, u_int val)
{
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"str%?h %0, [%2, #-4] @ NET_RDP"
:
@@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
static inline unsigned short read_rreg(u_long base_addr, u_int reg)
{
unsigned short v;
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"ldr%?h %0, [%2, #-4] @ NET_RDP"
: "=r" (v)
@@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
static inline void write_ireg(u_long base, u_int reg, u_int val)
{
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NET_RAP\n\t"
"str%?h %0, [%2, #8] @ NET_IDP"
:
@@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
static inline unsigned short read_ireg(u_long base_addr, u_int reg)
{
u_short v;
- __asm__(
+ asm volatile(
"str%?h %1, [%2] @ NAT_RAP\n\t"
"ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
: "=r" (v)
@@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
-static inline void
+static void
am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
{
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
- __asm__ __volatile__("str%?h %2, [%0], #4"
+ asm volatile("str%?h %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
}
while (length > 8) {
- unsigned int tmp, tmp2;
- __asm__ __volatile__(
- "ldm%?ia %1!, {%2, %3}\n\t"
+ register unsigned int tmp asm("r2"), tmp2 asm("r3");
+ asm volatile(
+ "ldm%?ia %0!, {%1, %2}"
+ : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+ length -= 8;
+ asm volatile(
+ "str%?h %1, [%0], #4\n\t"
+ "mov%? %1, %1, lsr #16\n\t"
+ "str%?h %1, [%0], #4\n\t"
"str%?h %2, [%0], #4\n\t"
"mov%? %2, %2, lsr #16\n\t"
- "str%?h %2, [%0], #4\n\t"
- "str%?h %3, [%0], #4\n\t"
- "mov%? %3, %3, lsr #16\n\t"
- "str%?h %3, [%0], #4"
- : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
- : "0" (offset), "1" (buf));
- length -= 8;
+ "str%?h %2, [%0], #4"
+ : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
}
while (length > 0) {
- __asm__ __volatile__("str%?h %2, [%0], #4"
+ asm volatile("str%?h %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
}
}
-static inline void
+static void
am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
{
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
unsigned int tmp;
- __asm__ __volatile__(
+ asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"str%?b %2, [%1], #1\n\t"
"mov%? %2, %2, lsr #8\n\t"
@@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
length -= 2;
}
while (length > 8) {
- unsigned int tmp, tmp2, tmp3;
- __asm__ __volatile__(
+ register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+ asm volatile(
"ldr%?h %2, [%0], #4\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
"ldr%?h %3, [%0], #4\n\t"
- "orr%? %2, %2, %3, lsl #16\n\t"
- "ldr%?h %3, [%0], #4\n\t"
+ "orr%? %2, %2, %4, lsl #16\n\t"
"ldr%?h %4, [%0], #4\n\t"
"orr%? %3, %3, %4, lsl #16\n\t"
"stm%?ia %1!, {%2, %3}"
@@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
}
while (length > 0) {
unsigned int tmp;
- __asm__ __volatile__(
+ asm volatile(
"ldr%?h %2, [%0], #4\n\t"
"str%?b %2, [%1], #1\n\t"
"mov%? %2, %2, lsr #8\n\t"
@@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
return errorcount;
}
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+ if (addr[0] & 0x01) {
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+ }
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+ unsigned int mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(hash, 0, 4 * sizeof(*hash));
+
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, hash);
+ }
+
+ return mode;
+}
+
static void
am79c961_init_for_open(struct net_device *dev)
{
@@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
unsigned long flags;
unsigned char *p;
u_int hdr_addr, first_free_addr;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
int i;
/*
@@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
for (i = LADRL; i <= LADRH; i++)
- write_rreg (dev->base_addr, i, 0);
+ write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
- i = MODE_PORT_10BT;
- if (dev->flags & IFF_PROMISC)
- i |= MODE_PROMISC;
-
- write_rreg (dev->base_addr, MODE, i);
+ write_rreg (dev->base_addr, MODE, mode);
write_rreg (dev->base_addr, POLLINT, 0);
write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
return 0;
}
-static void am79c961_mc_hash(char *addr, unsigned short *hash)
-{
- if (addr[0] & 0x01) {
- int idx, bit;
- u32 crc;
-
- crc = ether_crc_le(ETH_ALEN, addr);
-
- idx = crc >> 30;
- bit = (crc >> 26) & 15;
-
- hash[idx] |= 1 << bit;
- }
-}
-
/*
* Set or clear promiscuous/multicast mode filter for this adapter.
*/
@@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
unsigned long flags;
- unsigned short multi_hash[4], mode;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
int i, stopped;
- mode = MODE_PORT_10BT;
-
- if (dev->flags & IFF_PROMISC) {
- mode |= MODE_PROMISC;
- } else if (dev->flags & IFF_ALLMULTI) {
- memset(multi_hash, 0xff, sizeof(multi_hash));
- } else {
- struct netdev_hw_addr *ha;
-
- memset(multi_hash, 0x00, sizeof(multi_hash));
-
- netdev_for_each_mc_addr(ha, dev)
- am79c961_mc_hash(ha->addr, multi_hash);
- }
-
spin_lock_irqsave(&priv->chip_lock, flags);
stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 5a77001b6d10..0b46b8ea0e80 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb = dev_alloc_skb(length + 2);
if (likely(skb != NULL)) {
+ struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
skb_reserve(skb, 2);
- dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
+ dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
+ dma_sync_single_for_device(dev->dev.parent,
+ rxd->buf_addr, length,
+ DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
@@ -348,6 +352,7 @@ poll_some_more:
static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
+ struct ep93xx_tdesc *txd;
int entry;
if (unlikely(skb->len > MAX_PKT_SIZE)) {
@@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->tx_pointer;
ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
- ep->descs->tdesc[entry].tdesc1 =
- TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+ txd = &ep->descs->tdesc[entry];
+
+ txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+ dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
+ DMA_TO_DEVICE);
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
- dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
- skb->len, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
spin_lock_irq(&ep->tx_pending_lock);
@@ -457,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
static void ep93xx_free_buffers(struct ep93xx_priv *ep)
{
+ struct device *dev = ep->dev->dev.parent;
int i;
- for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
d = ep->descs->rdesc[i].buf_addr;
if (d)
- dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
if (ep->rx_buf[i] != NULL)
- free_page((unsigned long)ep->rx_buf[i]);
+ kfree(ep->rx_buf[i]);
}
- for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
d = ep->descs->tdesc[i].buf_addr;
if (d)
- dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
if (ep->tx_buf[i] != NULL)
- free_page((unsigned long)ep->tx_buf[i]);
+ kfree(ep->tx_buf[i]);
}
- dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs,
+ dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
}
-/*
- * The hardware enforces a sub-2K maximum packet size, so we put
- * two buffers on every hardware page.
- */
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
{
+ struct device *dev = ep->dev->dev.parent;
int i;
- ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs),
- &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA);
+ ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
+ &ep->descs_dma_addr, GFP_KERNEL);
if (ep->descs == NULL)
return 1;
- for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
- void *page;
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ void *buf;
dma_addr_t d;
- page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (page == NULL)
+ buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL)
goto err;
- d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(NULL, d)) {
- free_page((unsigned long)page);
+ d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, d)) {
+ kfree(buf);
goto err;
}
- ep->rx_buf[i] = page;
+ ep->rx_buf[i] = buf;
ep->descs->rdesc[i].buf_addr = d;
ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
-
- ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
- ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
- ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
}
- for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
- void *page;
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ void *buf;
dma_addr_t d;
- page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
- if (page == NULL)
+ buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL)
goto err;
- d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(NULL, d)) {
- free_page((unsigned long)page);
+ d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, d)) {
+ kfree(buf);
goto err;
}
- ep->tx_buf[i] = page;
+ ep->tx_buf[i] = buf;
ep->descs->tdesc[i].buf_addr = d;
-
- ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
- ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
}
return 0;
@@ -829,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
}
ep = netdev_priv(dev);
ep->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 68d45ba2d9b9..6c019e148546 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
MODULE_ALIAS("platform:bfin_mac");
#if defined(CONFIG_BFIN_MAC_USE_L1)
-# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
-# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr)
+# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
+# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
#else
-# define bfin_mac_alloc(dma_handle, size) \
- dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
-# define bfin_mac_free(dma_handle, ptr) \
- dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
+# define bfin_mac_alloc(dma_handle, size, num) \
+ dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr, num) \
+ dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
#endif
#define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
t = t->next;
}
}
- bfin_mac_free(dma_handle, tx_desc);
+ bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
}
if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
r = r->next;
}
}
- bfin_mac_free(dma_handle, rx_desc);
+ bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
}
}
@@ -126,13 +126,13 @@ static int desc_list_init(void)
#endif
tx_desc = bfin_mac_alloc(&dma_handle,
- sizeof(struct net_dma_desc_tx) *
+ sizeof(struct net_dma_desc_tx),
CONFIG_BFIN_TX_DESC_NUM);
if (tx_desc == NULL)
goto init_error;
rx_desc = bfin_mac_alloc(&dma_handle,
- sizeof(struct net_dma_desc_rx) *
+ sizeof(struct net_dma_desc_rx),
CONFIG_BFIN_RX_DESC_NUM);
if (rx_desc == NULL)
goto init_error;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6141667c5fb7..eafe44a528ac 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -113,9 +113,11 @@ MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
module_param_named(num_grat_arp, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)");
+MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
+ "failover event (alias of num_unsol_na)");
module_param_named(num_unsol_na, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)");
+MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
+ "failover event (alias of num_grat_arp)");
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
@@ -127,7 +129,7 @@ module_param(use_carrier, int, 0);
MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
"0 for off, 1 for on (default)");
module_param(mode, charp, 0);
-MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
+MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
"3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
"6 for balance-alb");
@@ -142,27 +144,35 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
"2 for only on active slave "
"failure");
module_param(lacp_rate, charp, 0);
-MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
- "(slow/fast)");
+MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
+ "0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)");
+MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+ "0 for stable (default), 1 for bandwidth, "
+ "2 for count");
module_param(xmit_hash_policy, charp, 0);
-MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
- ", 1 for layer 3+4");
+MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
+ "0 for layer 2 (default), 1 for layer 3+4, "
+ "2 for layer 2+3");
module_param(arp_interval, int, 0);
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
module_param_array(arp_ip_target, charp, NULL, 0);
MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
module_param(arp_validate, charp, 0);
-MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
+MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
+ "0 for none (default), 1 for active, "
+ "2 for backup, 3 for all");
module_param(fail_over_mac, charp, 0);
-MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
+MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
+ "the same MAC; 0 for none (default), "
+ "1 for active, 2 for follow");
module_param(all_slaves_active, int, 0);
MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
- "by setting active flag for all slaves. "
+ "by setting active flag for all slaves; "
"0 for never (default), 1 for always.");
module_param(resend_igmp, int, 0);
-MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
+MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
+ "link failure");
/*----------------------------- Global variables ----------------------------*/
@@ -378,6 +388,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
return next;
}
+#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -390,6 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
skb->priority = 1;
+
+ skb->queue_mapping = bond_queue_mapping(skb);
+
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
@@ -1282,6 +1297,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
goto out;
np->dev = slave->dev;
+ strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
err = __netpoll_setup(np);
if (err) {
kfree(np);
@@ -4196,6 +4212,7 @@ static inline int bond_slave_override(struct bonding *bond,
return res;
}
+
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
@@ -4206,6 +4223,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
*/
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+ bond_queue_mapping(skb) = skb->queue_mapping;
+
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
txq -= dev->real_num_tx_queues;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 73c7e03617ec..3df0c0f8b8bf 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
#endif
-static unsigned int ldisc_receive(struct tty_struct *tty,
- const u8 *data, char *flags, int count)
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
} else
++ser->dev->stats.rx_dropped;
update_tty_status(ser);
-
- return count;
}
static int handle_tx(struct ser_device *ser)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d4990568baee..17678117ed69 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
mem_size = resource_size(mem);
if (!request_mem_region(mem->start, mem_size, pdev->name)) {
err = -EBUSY;
- goto failed_req;
+ goto failed_get;
}
base = ioremap(mem->start, mem_size);
@@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
iounmap(base);
failed_map:
release_mem_region(mem->start, mem_size);
- failed_req:
- clk_put(clk);
failed_get:
+ clk_put(clk);
failed_clock:
return err;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 75622d54581f..1b49df6b2470 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
* in parallel
*/
-static unsigned int slcan_receive_buf(struct tty_struct *tty,
+static void slcan_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return -ENODEV;
+ return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
}
slcan_unesc(sl, *cp++);
}
-
- return count;
}
/************************************
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 807b6bb200eb..dcc4a170b0f3 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1772,7 +1772,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(emac_clk)) {
- printk(KERN_ERR "DaVinci EMAC: Failed to get EMAC clock\n");
+ dev_err(&pdev->dev, "failed to get EMAC clock\n");
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
@@ -1780,9 +1780,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
- printk(KERN_ERR "DaVinci EMAC: Error allocating net_device\n");
- clk_put(emac_clk);
- return -ENOMEM;
+ dev_err(&pdev->dev, "error allocating net_device\n");
+ rc = -ENOMEM;
+ goto free_clk;
}
platform_set_drvdata(pdev, ndev);
@@ -1795,8 +1795,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
- printk(KERN_ERR "DaVinci EMAC: No platform data\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "no platform data\n");
+ rc = -ENODEV;
+ goto probe_quit;
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1814,7 +1815,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(emac_dev, "DaVinci EMAC: Error getting res\n");
+ dev_err(&pdev->dev,"error getting res\n");
rc = -ENOENT;
goto probe_quit;
}
@@ -1822,14 +1823,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = res->end - res->start + 1;
if (!request_mem_region(res->start, size, ndev->name)) {
- dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
+ dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
rc = -ENXIO;
goto probe_quit;
}
priv->remap_addr = ioremap(res->start, size);
if (!priv->remap_addr) {
- dev_err(emac_dev, "Unable to map IO\n");
+ dev_err(&pdev->dev, "unable to map IO\n");
rc = -ENOMEM;
release_mem_region(res->start, size);
goto probe_quit;
@@ -1863,7 +1864,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->dma = cpdma_ctlr_create(&dma_params);
if (!priv->dma) {
- dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
+ dev_err(&pdev->dev, "error initializing DMA\n");
rc = -ENOMEM;
goto no_dma;
}
@@ -1879,7 +1880,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
+ dev_err(&pdev->dev, "error getting irq res\n");
rc = -ENOENT;
goto no_irq_res;
}
@@ -1888,8 +1889,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if none passed */
random_ether_addr(priv->mac_addr);
- printk(KERN_WARNING "%s: using random MAC addr: %pM\n",
- __func__, priv->mac_addr);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
}
ndev->netdev_ops = &emac_netdev_ops;
@@ -1902,7 +1903,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
- dev_err(emac_dev, "DaVinci EMAC: Error in register_netdev\n");
+ dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
goto netdev_reg_err;
}
@@ -1929,8 +1930,9 @@ no_dma:
iounmap(priv->remap_addr);
probe_quit:
- clk_put(emac_clk);
free_netdev(ndev);
+free_clk:
+ clk_put(emac_clk);
return rc;
}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 17654059922d..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -331,18 +331,18 @@ static struct {
"DE422",\
""}
-static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE;
+static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
enum depca_type {
DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
};
-static const char depca_string[] = "depca";
+static char depca_string[] = "depca";
static int depca_device_remove (struct device *device);
#ifdef CONFIG_EISA
-static const struct eisa_device_id depca_eisa_ids[] __devinitconst = {
+static struct eisa_device_id depca_eisa_ids[] = {
{ "DEC4220", de422 },
{ "" }
};
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
#define DE210_ID 0x628d
#define DE212_ID 0x6def
-static const short depca_mca_adapter_ids[] __devinitconst = {
+static short depca_mca_adapter_ids[] = {
DE210_ID,
DE212_ID,
0x0000
};
-static const char *depca_mca_adapter_name[] = {
+static char *depca_mca_adapter_name[] = {
"DEC EtherWORKS MC Adapter (DE210)",
"DEC EtherWORKS MC Adapter (DE212)",
NULL
};
-static const enum depca_type depca_mca_adapter_type[] = {
+static enum depca_type depca_mca_adapter_type[] = {
de210,
de212,
0
@@ -541,9 +541,10 @@ static void SetMulticastFilter(struct net_device *dev);
static int load_packet(struct net_device *dev, struct sk_buff *skb);
static void depca_dbg_open(struct net_device *dev);
-static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 };
-static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 };
-static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 };
+static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
+static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
+static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
+static u_char *depca_irq;
static int irq;
static int io;
@@ -579,7 +580,7 @@ static const struct net_device_ops depca_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit depca_hw_init (struct net_device *dev, struct device *device)
+static int __init depca_hw_init (struct net_device *dev, struct device *device)
{
struct depca_private *lp;
int i, j, offset, netRAM, mem_len, status = 0;
@@ -747,7 +748,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
if (dev->irq < 2) {
unsigned char irqnum;
unsigned long irq_mask, delay;
- const u_char *depca_irq;
irq_mask = probe_irq_on();
@@ -770,7 +770,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
break;
default:
- depca_irq = NULL;
break; /* Not reached */
}
@@ -1303,7 +1302,7 @@ static void SetMulticastFilter(struct net_device *dev)
}
}
-static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
+static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
{
int status = 0;
@@ -1334,7 +1333,7 @@ static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
/*
** Microchannel bus I/O device probe
*/
-static int __devinit depca_mca_probe(struct device *device)
+static int __init depca_mca_probe(struct device *device)
{
unsigned char pos[2];
unsigned char where;
@@ -1458,7 +1457,7 @@ static int __devinit depca_mca_probe(struct device *device)
** ISA bus I/O device probe
*/
-static void __devinit depca_platform_probe (void)
+static void __init depca_platform_probe (void)
{
int i;
struct platform_device *pldev;
@@ -1498,7 +1497,7 @@ static void __devinit depca_platform_probe (void)
}
}
-static enum depca_type __devinit depca_shmem_probe (ulong *mem_start)
+static enum depca_type __init depca_shmem_probe (ulong *mem_start)
{
u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
enum depca_type adapter = unknown;
@@ -1559,7 +1558,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
*/
#ifdef CONFIG_EISA
-static int __devinit depca_eisa_probe (struct device *device)
+static int __init depca_eisa_probe (struct device *device)
{
enum depca_type adapter = unknown;
struct eisa_device *edev;
@@ -1630,7 +1629,7 @@ static int __devexit depca_device_remove (struct device *device)
** and Boot (readb) ROM. This will also give us a clue to the network RAM
** base address.
*/
-static int __devinit DepcaSignature(char *name, u_long base_addr)
+static int __init DepcaSignature(char *name, u_long base_addr)
{
u_int i, j, k;
void __iomem *ptr;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c445457b66d5..23179dbcedd2 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -346,7 +346,7 @@ parse_eeprom (struct net_device *dev)
if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
/* Check CRC */
crc = ~ether_crc_le (256 - 4, sromdata);
- if (psrom->crc != crc) {
+ if (psrom->crc != cpu_to_le32(crc)) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n",
dev->name);
return -1;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index fbaff3584bd4..ee597e676ee5 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
irqflags |= IRQF_SHARED;
- if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
- return -EAGAIN;
-
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
+ if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
+ return -EAGAIN;
+
/* Init driver variable */
db->dbug_cnt = 0;
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 7a84e45487e8..7583a9572bcc 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr;
- fpi->dpram_offset = cpm_dpalloc(128, 8);
+ fpi->dpram_offset = cpm_dpalloc(128, 32);
if (IS_ERR_VALUE(fpi->dpram_offset)) {
ret = fpi->dpram_offset;
goto out_fcccp;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ff60b23a5b74..2dfcc8047847 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -10,7 +10,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -476,9 +476,6 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
-unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
-unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
-
void lock_rx_qs(struct gfar_private *priv)
{
int i = 0x0;
@@ -868,28 +865,28 @@ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
rqfar--;
rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
- ftp_rqfpr[rqfar] = rqfpr;
- ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_NOMATCH;
- ftp_rqfpr[rqfar] = rqfpr;
- ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
rqfpr = class;
- ftp_rqfcr[rqfar] = rqfcr;
- ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar--;
rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
rqfpr = class;
- ftp_rqfcr[rqfar] = rqfcr;
- ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
return rqfar;
@@ -904,8 +901,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
/* Default rule */
rqfcr = RQFCR_CMP_MATCH;
- ftp_rqfcr[rqfar] = rqfcr;
- ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
@@ -921,8 +918,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
/* Rest are masked rules */
rqfcr = RQFCR_CMP_NOMATCH;
for (i = 0; i < rqfar; i++) {
- ftp_rqfcr[i] = rqfcr;
- ftp_rqfpr[i] = rqfpr;
+ priv->ftp_rqfcr[i] = rqfcr;
+ priv->ftp_rqfpr[i] = rqfpr;
gfar_write_filer(priv, i, rqfcr, rqfpr);
}
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index fc86f5195445..ba36dc7a3435 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -1107,10 +1107,12 @@ struct gfar_private {
/* HW time stamping enabled flag */
int hwts_rx_en;
int hwts_tx_en;
+
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
};
-extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
-extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
static inline int gfar_has_errata(struct gfar_private *priv,
enum gfar_errata err)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 493d743839d9..239e3330495f 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
+ * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
*
* This software may be used and distributed according to
* the terms of the GNU Public License, Version 2, incorporated herein
@@ -609,15 +609,15 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -626,16 +626,16 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -643,8 +643,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -652,8 +652,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -661,8 +661,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -670,8 +670,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
RQFCR_AND | RQFCR_HASHTBL_0;
- ftp_rqfpr[priv->cur_filer_idx] = fpr;
- ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
@@ -705,12 +705,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
}
for (i = 0; i < MAX_FILER_IDX + 1; i++) {
- local_rqfpr[j] = ftp_rqfpr[i];
- local_rqfcr[j] = ftp_rqfcr[i];
+ local_rqfpr[j] = priv->ftp_rqfpr[i];
+ local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
- if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+ if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
RQFCR_CLE |RQFCR_AND)) &&
- (ftp_rqfpr[i] == cmp_rqfpr))
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@@ -724,20 +724,22 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
* if it was already programmed, we need to overwrite these rules
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
- if ((ftp_rqfcr[l] & RQFCR_CLE) &&
- !(ftp_rqfcr[l] & RQFCR_AND)) {
- ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+ if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
- ftp_rqfpr[l] = FPR_FILER_MASK;
- gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
+ priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+ gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+ priv->ftp_rqfpr[l]);
break;
}
- if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
+ if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ (priv->ftp_rqfcr[l] & RQFCR_AND))
continue;
else {
- local_rqfpr[j] = ftp_rqfpr[l];
- local_rqfcr[j] = ftp_rqfcr[l];
+ local_rqfpr[j] = priv->ftp_rqfpr[l];
+ local_rqfcr[j] = priv->ftp_rqfcr[l];
j--;
}
}
@@ -750,8 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
/* Write back the popped out rules again */
for (k = j+1; k < MAX_FILER_IDX; k++) {
- ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
- ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+ priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+ priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 992089639ea4..3e5d0b6b6516 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,7 +456,7 @@ out:
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
-static unsigned int sixpack_receive_buf(struct tty_struct *tty,
+static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
int count1;
if (!count)
- return 0;
+ return;
sp = sp_get(tty);
if (!sp)
- return -ENODEV;
+ return;
memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
sp_put(sp);
tty_unthrottle(tty);
-
- return count1;
}
/*
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 0e4f23531140..4c628393c8b1 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
* a block of data has been received, which can now be decapsulated
* and sent on to the AX.25 layer for further processing.
*/
-static unsigned int mkiss_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct mkiss *ax = mkiss_get(tty);
- int bytes = count;
if (!ax)
- return -ENODEV;
+ return;
/*
* Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
ax_changedmtu(ax);
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp != NULL && *fp++) {
if (!test_and_set_bit(AXF_ERROR, &ax->flags))
ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
mkiss_put(ax);
tty_unthrottle(tty);
-
- return count;
}
/*
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index c52a1df5d922..c3ecb118c1df 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -188,14 +188,14 @@ struct hp100_private {
* variables
*/
#ifdef CONFIG_ISA
-static const char *const hp100_isa_tbl[] __devinitconst = {
+static const char *hp100_isa_tbl[] = {
"HWPF150", /* HP J2573 rev A */
"HWP1950", /* HP J2573 */
};
#endif
#ifdef CONFIG_EISA
-static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = {
+static struct eisa_device_id hp100_eisa_tbl[] = {
{ "HWPF180" }, /* HP J2577 rev A */
{ "HWP1920" }, /* HP 27248B */
{ "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
}
#ifdef CONFIG_ISA
-static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
{
const char *sig;
int i;
@@ -372,7 +372,7 @@ static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
* EISA and PCI are handled by device infrastructure.
*/
-static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
+static int __init hp100_isa_probe(struct net_device *dev, int addr)
{
int err = -ENODEV;
@@ -396,7 +396,7 @@ static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
#endif /* CONFIG_ISA */
#if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __devinit hp100_probe(int unit)
+struct net_device * __init hp100_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
int err;
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
lp->txrcommit++;
- spin_unlock_irqrestore(&lp->lock, flags);
- /* Update statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
drop:
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
}
#ifdef CONFIG_EISA
-static int __devinit hp100_eisa_probe (struct device *gendev)
+static int __init hp100_eisa_probe (struct device *gendev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
struct eisa_device *edev = to_eisa_device(gendev);
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index b6060f7538df..a900d5bf2948 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
}
/* Initialise a single lance board at the given DIO device */
-static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
+static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 136d7544cc33..a7d6cad32953 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -895,12 +895,12 @@ static int ibmlana_irq;
static int ibmlana_io;
static int startslot; /* counts through slots when probing multiple devices */
-static const short ibmlana_adapter_ids[] __devinitconst = {
+static short ibmlana_adapter_ids[] __initdata = {
IBM_LANA_ID,
0x0000
};
-static const char *const ibmlana_adapter_names[] __devinitconst = {
+static char *ibmlana_adapter_names[] __devinitdata = {
"IBM LAN Adapter/A",
NULL
};
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 18fccf913635..2c28621eb30b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2373,6 +2373,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
}
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+ /* i350 cannot do RSS and SR-IOV at the same time */
+ if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
+ adapter->rss_queues = 1;
/*
* if rss_queues > 4 or vfs are going to be allocated with rss_queues
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 035861d8acb1..3352b2443e58 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* usbserial: urb-complete-interrupt / softint
*/
-static unsigned int irtty_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct sir_dev *dev;
struct sirtty_cb *priv = tty->disc_data;
int i;
- IRDA_ASSERT(priv != NULL, return -ENODEV;);
- IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;);
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
if (unlikely(count==0)) /* yes, this happens */
- return 0;
+ return;
dev = priv->dev;
if (!dev) {
IRDA_WARNING("%s(), not ready yet!\n", __func__);
- return -ENODEV;
+ return;
}
for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
if (fp && *fp++) {
IRDA_DEBUG(0, "Framing or parity error!\n");
sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
- return -EINVAL;
+ return;
}
}
sirdev_receive(dev, cp, count);
-
- return count;
}
/*
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 69b5707db369..8800e1fe4129 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
/* Probing */
-static int smsc_ircc_look_for_chips(void);
-static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
-static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_fdc(unsigned short cfg_base);
-static int smsc_superio_lpc(unsigned short cfg_base);
+static int __init smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
#ifdef CONFIG_PCI
-static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
-static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static void preconfigure_ali_port(struct pci_dev *dev,
+static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void __init preconfigure_ali_port(struct pci_dev *dev,
unsigned short port);
-static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
unsigned short ircc_fir,
unsigned short ircc_sir,
unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
}
/* PNP hotplug support */
-static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = {
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
{ .id = "SMCf010", .driver_data = 0 },
/* and presumably others */
{ }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
* Try to open driver instance
*
*/
-static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
{
struct smsc_ircc_cb *self;
struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
}
-static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
+static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
{
IRDA_DEBUG(1, "%s\n", __func__);
@@ -2281,7 +2281,7 @@ static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
return inb(cfg_base) != reg ? -1 : 0;
}
-static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
{
u8 devid, xdevid, rev;
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
#ifdef CONFIG_PCI
#define PCIID_VENDOR_INTEL 0x8086
#define PCIID_VENDOR_ALI 0x10b9
-static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = {
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
/*
* Subsystems needing entries:
* 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static const struct smsc_ircc_subsystem_configuration subsystem_configurations[]
* (FIR port, SIR port, FIR DMA, FIR IRQ)
* through the chip configuration port.
*/
-static int __devinit preconfigure_smsc_chip(struct
+static int __init preconfigure_smsc_chip(struct
smsc_ircc_subsystem_configuration
*conf)
{
@@ -2633,7 +2633,7 @@ static int __devinit preconfigure_smsc_chip(struct
* or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
* They all work the same way!
*/
-static int __devinit preconfigure_through_82801(struct pci_dev *dev,
+static int __init preconfigure_through_82801(struct pci_dev *dev,
struct
smsc_ircc_subsystem_configuration
*conf)
@@ -2786,7 +2786,7 @@ static int __devinit preconfigure_through_82801(struct pci_dev *dev,
* This is based on reverse-engineering since ALi does not
* provide any data sheet for the 1533 chip.
*/
-static void __devinit preconfigure_ali_port(struct pci_dev *dev,
+static void __init preconfigure_ali_port(struct pci_dev *dev,
unsigned short port)
{
unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __devinit preconfigure_ali_port(struct pci_dev *dev,
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
}
-static int __devinit preconfigure_through_ali(struct pci_dev *dev,
+static int __init preconfigure_through_ali(struct pci_dev *dev,
struct
smsc_ircc_subsystem_configuration
*conf)
@@ -2837,7 +2837,7 @@ static int __devinit preconfigure_through_ali(struct pci_dev *dev,
return preconfigure_smsc_chip(conf);
}
-static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
unsigned short ircc_fir,
unsigned short ircc_sir,
unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
int ret = 0;
for_each_pci_dev(dev) {
- const struct smsc_ircc_subsystem_configuration *conf;
+ struct smsc_ircc_subsystem_configuration *conf;
/*
* Cache the subsystem vendor/device:
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 4d40626b3bfa..fc12ac0d9f2e 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
/* check the status */
if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
if (skb) {
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index e8984b0ca521..243ed2aee88e 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -80,20 +80,17 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
#define NE3210_DEBUG 0x0
-static const unsigned char irq_map[] __devinitconst =
- { 15, 12, 11, 10, 9, 7, 5, 3 };
-static const unsigned int shmem_map[] __devinitconst =
- { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 };
-static const char *const ifmap[] __devinitconst =
- { "UTP", "?", "BNC", "AUI" };
-static const int ifmap_val[] __devinitconst = {
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
+static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static int ifmap_val[] __initdata = {
IF_PORT_10BASET,
IF_PORT_UNKNOWN,
IF_PORT_10BASE2,
IF_PORT_AUI,
};
-static int __devinit ne3210_eisa_probe (struct device *device)
+static int __init ne3210_eisa_probe (struct device *device)
{
unsigned long ioaddr, phys_mem;
int i, retval, port_index;
@@ -316,7 +313,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
memcpy_toio(shmem, buf, count);
}
-static const struct eisa_device_id ne3210_ids[] __devinitconst = {
+static struct eisa_device_id ne3210_ids[] = {
{ "EGL0101" },
{ "NVL1801" },
{ "" },
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b644383017f9..c0788a31ff0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netxen_tso_check(netdev, tx_ring, first_desc, skb);
- netxen_nic_update_cmd_producer(adapter, tx_ring);
-
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
return NETDEV_TX_OK;
drop_packet:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392a6c4b72e5..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,6 +58,7 @@ config BROADCOM_PHY
config BCM63XX_PHY
tristate "Drivers for Broadcom 63xx SOCs internal PHY"
+ depends on BCM63XX
---help---
Currently supports the 6348 and 6358 PHYs.
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0c9522bb535..2cd8dc5847b4 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
/* time stamping methods */
-static void decode_evnt(struct dp83640_private *dp83640,
- struct phy_txts *phy_txts, u16 ests)
+static int decode_evnt(struct dp83640_private *dp83640,
+ void *data, u16 ests)
{
+ struct phy_txts *phy_txts;
struct ptp_clock_event event;
int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
+ u16 ext_status = 0;
+
+ if (ests & MULT_EVNT) {
+ ext_status = *(u16 *) data;
+ data += sizeof(ext_status);
+ }
+
+ phy_txts = data;
switch (words) { /* fall through in every case */
case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
event.timestamp = phy2txts(&dp83640->edata);
ptp_clock_event(dp83640->clock->ptp_clock, &event);
+
+ words = ext_status ? words + 2 : words + 1;
+ return words * sizeof(u16);
}
static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
} else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
- phy_txts = (struct phy_txts *) ptr;
- decode_evnt(dp83640, phy_txts, ests);
- size = sizeof(*phy_txts);
+ size = decode_evnt(dp83640, ptr, ests);
} else {
size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
- /* Let the stack drop this frame. */
- return false;
+ kfree_skb(skb);
+ return true;
}
SKB_PTP_TYPE(skb) = type;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 53872d7d7382..c554a397e558 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
- return -ENODEV;
+ return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
ap_put(ap);
tty_unthrottle(tty);
-
- return count;
}
static void
@@ -525,7 +523,7 @@ static void ppp_async_process(unsigned long arg)
#define PUT_BYTE(ap, buf, c, islcp) do { \
if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
*buf++ = PPP_ESCAPE; \
- *buf++ = c ^ 0x20; \
+ *buf++ = c ^ PPP_TRANS; \
} else \
*buf++ = c; \
} while (0)
@@ -898,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
sp = skb_put(skb, n);
memcpy(sp, buf, n);
if (ap->state & SC_ESCAPE) {
- sp[0] ^= 0x20;
+ sp[0] ^= PPP_TRANS;
ap->state &= ~SC_ESCAPE;
}
}
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 0815790a5cf9..2573f525f11c 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
- return -ENODEV;
+ return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
sp_put(ap);
tty_unthrottle(tty);
-
- return count;
}
static void
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 89f7540d90f9..5f597ca592bb 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += length;
stats->tx_packets++;
dev->trans_start = jiffies;
if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e9656616f2a2..a5d9fbf9d816 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1406,6 +1406,7 @@ qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
for (loop = 0; loop < que->no_ops; loop++) {
QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
+ addr = que->read_addr;
for (i = 0; i < cnt; i++) {
QLCNIC_RD_DUMP_REG(addr, base, &data);
*buffer++ = cpu_to_le32(data);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 3ab7d2c7baf2..0f6af5c61a7c 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2159,6 +2159,7 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ pbuf->skb = NULL;
}
static inline void
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ef1ce2ebeb4a..05d81780d1fd 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
*
* (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
*/
- static const struct {
+ static const struct rtl_mac_info {
u32 mask;
u32 val;
int mac_version;
@@ -1689,7 +1689,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
/* Catch-all */
{ 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
- }, *p = mac_info;
+ };
+ const struct rtl_mac_info *p = mac_info;
u32 reg;
reg = RTL_R32(TxConfig);
@@ -3681,7 +3682,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
{
- static const struct {
+ static const struct rtl_cfg2_info {
u32 mac_version;
u32 clk;
u32 val;
@@ -3690,7 +3691,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
- }, *p = cfg2_info;
+ };
+ const struct rtl_cfg2_info *p = cfg2_info;
unsigned int i;
u32 clk;
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index e646bfce2d84..b6304486f244 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -216,7 +216,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
int rc;
for (;;) {
- rc = del_mtd_device(&part->mtd);
+ rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
@@ -268,7 +268,7 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
part->mtd.write = efx_mtd->ops->write;
part->mtd.sync = efx_mtd_sync;
- if (add_mtd_device(&part->mtd))
+ if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
}
@@ -280,7 +280,7 @@ fail:
--part;
efx_mtd_remove_partition(part);
}
- /* add_mtd_device() returns 1 if the MTD table is full */
+ /* mtd_device_register() returns 1 if the MTD table is full */
return -ENOMEM;
}
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 584809c656d5..8ec1a9a0bb9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
* in parallel
*/
-static unsigned int slip_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct slip *sl = tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
- return -ENODEV;
+ return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
#endif
slip_unesc(sl, *cp++);
}
-
- return count;
}
/************************************
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 0f29f261fcfe..d07c39cb4daf 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -156,7 +156,7 @@ static const struct {
{ 14, 15 }
};
-static const short smc_mca_adapter_ids[] __devinitconst = {
+static short smc_mca_adapter_ids[] __initdata = {
0x61c8,
0x61c9,
0x6fc0,
@@ -168,7 +168,7 @@ static const short smc_mca_adapter_ids[] __devinitconst = {
0x0000
};
-static const char *const smc_mca_adapter_names[] __devinitconst = {
+static char *smc_mca_adapter_names[] __initdata = {
"SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
"SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
"WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
#endif
};
-static int __devinit ultramca_probe(struct device *gen_dev)
+static int __init ultramca_probe(struct device *gen_dev)
{
unsigned short ioaddr;
struct net_device *dev;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index dc4805f473e3..f6285748bd3c 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2400,8 +2400,10 @@ static const struct of_device_id smc91x_match[] = {
{ .compatible = "smsc,lan91c94", },
{ .compatible = "smsc,lan91c111", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, smc91x_match);
+#else
+#define smc91x_match NULL
#endif
static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2416,9 +2418,7 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
-#ifdef CONFIG_OF
.of_match_table = smc91x_match,
-#endif
},
};
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4b01c638a33..a1f9f9eef37d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
dma_unmap_addr(txb, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
- for (i = 0; i <= last; i++) {
+ for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 1313aa1315f0..2bedc0ace812 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
return 0;
}
-static const short madgemc_adapter_ids[] __devinitconst = {
+static short madgemc_adapter_ids[] __initdata = {
0x002d,
0x0000
};
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 45144d5bd11b..efaa1d69b720 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-static int __devinit de4x5_eisa_probe (struct device *gendev)
+static int __init de4x5_eisa_probe (struct device *gendev)
{
struct eisa_device *edev;
u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
return 0;
}
-static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = {
+static struct eisa_device_id de4x5_eisa_ids[] = {
{ "DEC4250", 0 }, /* 0 is the board name index... */
{ "" }
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 74e94054ab1a..5235f48be1be 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
-
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tun_poll_controller(struct net_device *dev)
+{
+ /*
+ * Tun only receives frames when:
+ * 1) the char device endpoint gets data from user space
+ * 2) the tun socket gets a sendmsg call from user space
+ * Since both of those are syncronous operations, we are guaranteed
+ * never to have pending data when we poll for it
+ * so theres nothing to do here but return.
+ * We need this though so netpoll recognizes us as an interface that
+ * supports polling, which enables bridge devices in virt setups to
+ * still use netconsole
+ */
+ return;
+}
+#endif
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tun_poll_controller,
+#endif
};
static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_multicast_list = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tun_poll_controller,
+#endif
};
/* Initialize net device. */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9d4f9117260f..84d4608153c9 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
router with USB ethernet port. This driver is for routers only,
it will not work with ADSL modems (use cxacru driver instead).
+config USB_NET_KALMIA
+ tristate "Samsung Kalmia based LTE USB modem"
+ depends on USB_USBNET
+ help
+ Choose this option if you have a Samsung Kalmia based USB modem
+ as Samsung GT-B3730.
+
+ To compile this driver as a module, choose M here: the
+ module will be called kalmia.
+
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c7ec8a5f0a90..c203fa21f6b1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
+obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d7221c4a5dcf..8056f8a27c6a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc)
if (!q->dir && q->buf && q->len)
memcpy(catc->ctrl_buf, q->buf, q->len);
- if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL)))
+ if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
err("submit(ctrl_urb) status %d", status);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index cdd3ae486109..f33ca6aa29e9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "24-May-2011"
+#define DRIVER_VERSION "01-June-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
.disconnect = cdc_ncm_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
.supports_autosuspend = 1,
};
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644
index 000000000000..d965fb1e013e
--- /dev/null
+++ b/drivers/net/usb/kalmia.c
@@ -0,0 +1,384 @@
+/*
+ * USB network interface driver for Samsung Kalmia based LTE USB modem like the
+ * Samsung GT-B3730 and GT-B3710.
+ *
+ * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
+ *
+ * Sponsored by Quicklink Video Distribution Services Ltd.
+ *
+ * Based on the cdc_eem module.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/gfp.h>
+
+/*
+ * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
+ * handled by the "option" module and an ethernet data port handled by this
+ * module.
+ *
+ * The stick must first be switched into modem mode by usb_modeswitch
+ * or similar tool. Then the modem gets sent two initialization packets by
+ * this module, which gives the MAC address of the device. User space can then
+ * connect the modem using AT commands through the ACM port and then use
+ * DHCP on the network interface exposed by this module. Network packets are
+ * sent to and from the modem in a proprietary format discovered after watching
+ * the behavior of the windows driver for the modem.
+ *
+ * More information about the use of the modem is available in usb_modeswitch
+ * forum and the project page:
+ *
+ * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
+ * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE */
+
+#define KALMIA_HEADER_LENGTH 6
+#define KALMIA_ALIGN_SIZE 4
+#define KALMIA_USB_TIMEOUT 10000
+
+/*-------------------------------------------------------------------------*/
+
+static int
+kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
+ u8 *buffer, u8 expected_len)
+{
+ int act_len;
+ int status;
+
+ netdev_dbg(dev->net, "Sending init packet");
+
+ status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
+ init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
+ if (status != 0) {
+ netdev_err(dev->net,
+ "Error sending init packet. Status %i, length %i\n",
+ status, act_len);
+ return status;
+ }
+ else if (act_len != init_msg_len) {
+ netdev_err(dev->net,
+ "Did not send all of init packet. Bytes sent: %i",
+ act_len);
+ }
+ else {
+ netdev_dbg(dev->net, "Successfully sent init packet.");
+ }
+
+ status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
+ buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
+
+ if (status != 0)
+ netdev_err(dev->net,
+ "Error receiving init result. Status %i, length %i\n",
+ status, act_len);
+ else if (act_len != expected_len)
+ netdev_err(dev->net, "Unexpected init result length: %i\n",
+ act_len);
+
+ return status;
+}
+
+static int
+kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
+{
+ char init_msg_1[] =
+ { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x00, 0x00 };
+ char init_msg_2[] =
+ { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
+ 0x00, 0x00 };
+ char receive_buf[28];
+ int status;
+
+ status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
+ / sizeof(init_msg_1[0]), receive_buf, 24);
+ if (status != 0)
+ return status;
+
+ status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
+ / sizeof(init_msg_2[0]), receive_buf, 28);
+ if (status != 0)
+ return status;
+
+ memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
+
+ return status;
+}
+
+static int
+kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u8 status;
+ u8 ethernet_addr[ETH_ALEN];
+
+ /* Don't bind to AT command interface */
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+ return -EINVAL;
+
+ dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
+ dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
+ dev->status = NULL;
+
+ dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
+ dev->hard_mtu = 1400;
+ dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
+
+ status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
+
+ if (status < 0) {
+ usb_set_intfdata(intf, NULL);
+ usb_driver_release_interface(driver_of(intf), intf);
+ return status;
+ }
+
+ memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
+ memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
+
+ return status;
+}
+
+static struct sk_buff *
+kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *skb2 = NULL;
+ u16 content_len;
+ unsigned char *header_start;
+ unsigned char ether_type_1, ether_type_2;
+ u8 remainder, padlen = 0;
+
+ if (!skb_cloned(skb)) {
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+
+ if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
+ >= KALMIA_HEADER_LENGTH))
+ goto done;
+
+ if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
+ + KALMIA_ALIGN_SIZE)) {
+ skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
+ skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ goto done;
+ }
+ }
+
+ skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
+ KALMIA_ALIGN_SIZE, flags);
+ if (!skb2)
+ return NULL;
+
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+
+ done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
+ ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
+ ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
+
+ netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
+ ether_type_2);
+
+ /* According to empiric data for data packages */
+ header_start[0] = 0x57;
+ header_start[1] = 0x44;
+ content_len = skb->len - KALMIA_HEADER_LENGTH;
+ header_start[2] = (content_len & 0xff); /* low byte */
+ header_start[3] = (content_len >> 8); /* high byte */
+
+ header_start[4] = ether_type_1;
+ header_start[5] = ether_type_2;
+
+ /* Align to 4 bytes by padding with zeros */
+ remainder = skb->len % KALMIA_ALIGN_SIZE;
+ if (remainder > 0) {
+ padlen = KALMIA_ALIGN_SIZE - remainder;
+ memset(skb_put(skb, padlen), 0, padlen);
+ }
+
+ netdev_dbg(
+ dev->net,
+ "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
+ content_len, padlen, header_start[0], header_start[1],
+ header_start[2], header_start[3], header_start[4],
+ header_start[5]);
+
+ return skb;
+}
+
+static int
+kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ /*
+ * Our task here is to strip off framing, leaving skb with one
+ * data frame for the usbnet framework code to process.
+ */
+ const u8 HEADER_END_OF_USB_PACKET[] =
+ { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
+ const u8 EXPECTED_UNKNOWN_HEADER_1[] =
+ { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
+ const u8 EXPECTED_UNKNOWN_HEADER_2[] =
+ { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
+ u8 i = 0;
+
+ /* incomplete header? */
+ if (skb->len < KALMIA_HEADER_LENGTH)
+ return 0;
+
+ do {
+ struct sk_buff *skb2 = NULL;
+ u8 *header_start;
+ u16 usb_packet_length, ether_packet_length;
+ int is_last;
+
+ header_start = skb->data;
+
+ if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
+ if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
+ sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
+ header_start, EXPECTED_UNKNOWN_HEADER_2,
+ sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
+ netdev_dbg(
+ dev->net,
+ "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1],
+ header_start[2], header_start[3],
+ header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+ }
+ else {
+ netdev_err(
+ dev->net,
+ "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1],
+ header_start[2], header_start[3],
+ header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+ return 0;
+ }
+ }
+ else
+ netdev_dbg(
+ dev->net,
+ "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1], header_start[2],
+ header_start[3], header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+
+ /* subtract start header and end header */
+ usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
+ ether_packet_length = header_start[2] + (header_start[3] << 8);
+ skb_pull(skb, KALMIA_HEADER_LENGTH);
+
+ /* Some small packets misses end marker */
+ if (usb_packet_length < ether_packet_length) {
+ ether_packet_length = usb_packet_length
+ + KALMIA_HEADER_LENGTH;
+ is_last = true;
+ }
+ else {
+ netdev_dbg(dev->net, "Correct package length #%i", i
+ + 1);
+
+ is_last = (memcmp(skb->data + ether_packet_length,
+ HEADER_END_OF_USB_PACKET,
+ sizeof(HEADER_END_OF_USB_PACKET)) == 0);
+ if (!is_last) {
+ header_start = skb->data + ether_packet_length;
+ netdev_dbg(
+ dev->net,
+ "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+ header_start[0], header_start[1],
+ header_start[2], header_start[3],
+ header_start[4], header_start[5],
+ skb->len - KALMIA_HEADER_LENGTH);
+ }
+ }
+
+ if (is_last) {
+ skb2 = skb;
+ }
+ else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2))
+ return 0;
+ }
+
+ skb_trim(skb2, ether_packet_length);
+
+ if (is_last) {
+ return 1;
+ }
+ else {
+ usbnet_skb_return(dev, skb2);
+ skb_pull(skb, ether_packet_length);
+ }
+
+ i++;
+ }
+ while (skb->len);
+
+ return 1;
+}
+
+static const struct driver_info kalmia_info = {
+ .description = "Samsung Kalmia LTE USB dongle",
+ .flags = FLAG_WWAN,
+ .bind = kalmia_bind,
+ .rx_fixup = kalmia_rx_fixup,
+ .tx_fixup = kalmia_tx_fixup
+};
+
+/*-------------------------------------------------------------------------*/
+
+static const struct usb_device_id products[] = {
+ /* The unswitched USB ID, to get the module auto loaded: */
+ { USB_DEVICE(0x04e8, 0x689a) },
+ /* The stick swithed into modem (by e.g. usb_modeswitch): */
+ { USB_DEVICE(0x04e8, 0x6889),
+ .driver_info = (unsigned long) &kalmia_info, },
+ { /* EMPTY == end of list */} };
+MODULE_DEVICE_TABLE( usb, products);
+
+static struct usb_driver kalmia_driver = {
+ .name = "kalmia",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume
+};
+
+static int __init kalmia_init(void)
+{
+ return usb_register(&kalmia_driver);
+}
+module_init( kalmia_init);
+
+static void __exit kalmia_exit(void)
+{
+ usb_deregister(&kalmia_driver);
+}
+module_exit( kalmia_exit);
+
+MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
+MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0cb0b0632672..f6853247a620 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -609,7 +609,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e050bd65e037..777d1a4e81b2 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2203,8 +2203,10 @@ fst_open(struct net_device *dev)
if (port->mode != FST_RAW) {
err = hdlc_open(dev);
- if (err)
+ if (err) {
+ module_put(THIS_MODULE);
return err;
+ }
}
fst_openport(port);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 40398bf7d036..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
* and sent on to some IP layer for further processing.
*/
-static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
+static void x25_asy_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct x25_asy *sl = tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
}
x25_asy_unesc(sl, *cp++);
}
-
- return count;
}
/*
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 22047628ccfa..b6c5d3715b96 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -72,6 +72,11 @@ static int modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
+static int modparam_fastchanswitch;
+module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
+MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+
+
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
@@ -2686,6 +2691,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
int ret, ani_mode;
+ bool fast;
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
@@ -2705,7 +2711,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
ath5k_drain_tx_buffs(sc);
if (chan)
sc->curchan = chan;
- ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
+
+ fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
+
+ ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
skip_pcu);
if (ret) {
ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 3510de2cf622..126a4eab35f3 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1124,8 +1124,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Non fatal, can happen eg.
* on mode change */
ret = 0;
- } else
+ } else {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ "fast chan change successful\n");
return 0;
+ }
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9ff8413ab9a..d9c08c619a3a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -26,7 +26,6 @@ config ATH9K
config ATH9K_PCI
bool "Atheros ath9k PCI/PCIe bus support"
depends on ATH9K && PCI
- default PCI
---help---
This option enables the PCI bus support in ath9k.
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 015d97439935..2d4c0910295b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (AR_SREV_9271(ah)) {
if (!ar9285_hw_cl_cal(ah, chan))
return false;
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
if (!ar9285_hw_clc(ah, chan))
return false;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 0ca7635d0669..ff8150e46f0e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index eee23ecd118a..892c48b15434 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1381,3 +1381,25 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
"==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
+
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
+{
+ u32 val;
+
+ /* While receiving unsupported rate frame rx state machine
+ * gets into a state 0xb and if phy_restart happens in that
+ * state, BB would go hang. If RXSM is in 0xb state after
+ * first bb panic, ensure to disable the phy_restart.
+ */
+ if (!((MS(ah->bb_watchdog_last_status,
+ AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
+ ah->bb_hang_rx_ofdm))
+ return;
+
+ ah->bb_hang_rx_ofdm = true;
+ val = REG_READ(ah, AR_PHY_RESTART);
+ val &= ~AR_PHY_RESTART_ENA;
+
+ REG_WRITE(ah, AR_PHY_RESTART, val);
+}
+EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 7856f0d4512d..343fc9f946db 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
scaledPower = max((u16)0, scaledPower);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 72543ce8f616..1be7c8bbef84 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1555,9 +1555,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
- if (AR_SREV_9300_20_OR_LATER(ah))
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
+ ar9003_hw_disable_phy_restart(ah);
+ }
+
ath9k_hw_apply_gpio_override(ah);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 57435ce62792..4b157c53d1a8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -842,6 +842,7 @@ struct ath_hw {
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+ u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
unsigned int paprd_target_power;
unsigned int paprd_training_power;
@@ -990,6 +991,7 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
void ar9003_paprd_enable(struct ath_hw *ah, bool val);
void ar9003_paprd_populate_single_table(struct ath_hw *ah,
struct ath9k_hw_cal_data *caldata,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a198ee374b05..2ca351fe6d3c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -670,7 +670,8 @@ void ath9k_tasklet(unsigned long data)
u32 status = sc->intrstatus;
u32 rxmask;
- if (status & ATH9K_INT_FATAL) {
+ if ((status & ATH9K_INT_FATAL) ||
+ (status & ATH9K_INT_BB_WATCHDOG)) {
ath_reset(sc, true);
return;
}
@@ -737,6 +738,7 @@ irqreturn_t ath_isr(int irq, void *dev)
{
#define SCHED_INTR ( \
ATH9K_INT_FATAL | \
+ ATH9K_INT_BB_WATCHDOG | \
ATH9K_INT_RXORN | \
ATH9K_INT_RXEOL | \
ATH9K_INT_RX | \
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 17542214c93f..ba7f36ab0a74 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -689,7 +689,8 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
rate->flags |= IEEE80211_TX_RC_MCS;
- if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
+ if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
+ conf_is_ht40(&txrc->hw->conf))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9ed65157bef5..05960ddde24e 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
int freq;
bool avoid = false;
u8 length;
- u16 tmp, core, type, count, max, numb, last, cmd;
+ u16 tmp, core, type, count, max, numb, last = 0, cmd;
const u16 *table;
bool phy6or5x;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
index 7e5e85a017b5..a7a4739880dc 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv,
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.rate_idx =
iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index f5433c74b845..facc94e74b07 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (priv->switch_rxon.switch_in_progress &&
- (priv->switch_rxon.channel != ctx->staging.channel)) {
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ (priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_rxon.channel));
+ le16_to_cpu(priv->switch_channel));
iwl_legacy_chswitch_done(priv, false);
}
@@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx);
- return 0;
+ goto set_tx_power;
}
/* If we are currently associated and the new config requires
@@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv);
+set_tx_power:
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
@@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
return rc;
}
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
-
return iwl_legacy_send_cmd_pdu(priv,
REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
@@ -1543,7 +1541,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
s32 temp;
temp = iwl4965_hw_get_temperature(priv);
- if (temp < 0)
+ if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
return;
if (priv->temperature != temp) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 42df8321dae8..3be76bd5499a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (priv->switch_rxon.switch_in_progress) {
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
- mutex_lock(&priv->mutex);
- priv->switch_rxon.switch_in_progress = false;
- mutex_unlock(&priv->mutex);
- }
}
EXPORT_SYMBOL(iwl_legacy_chswitch_done);
@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
- if (priv->switch_rxon.switch_in_progress) {
- if (!le32_to_cpu(csa->status) &&
- (csa->channel == priv->switch_rxon.channel)) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, false);
- }
+ iwl_legacy_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_legacy_chswitch_done(priv, false);
}
}
EXPORT_SYMBOL(iwl_legacy_rx_csa);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index bc66c604106c..c5fbda0760de 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
-
+#define STATUS_CHANNEL_SWITCH_PENDING 18
static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
{
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index be0106c6a2da..ea30122669ee 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -855,17 +855,6 @@ struct traffic_stats {
};
/*
- * iwl_switch_rxon: "channel switch" structure
- *
- * @ switch_in_progress: channel switch in progress
- * @ channel: new channel
- */
-struct iwl_switch_rxon {
- bool switch_in_progress;
- __le16 channel;
-};
-
-/*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
*/
@@ -1115,7 +1104,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
- struct iwl_switch_rxon switch_rxon;
+ __le16 switch_channel;
/* 1st responses from initialize and runtime uCode images.
* _4965's initialize alive response contains some calibration data. */
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index af2ae22fcfd3..7157ba529680 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
+ test_bit(STATUS_SCANNING, &priv->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_legacy_is_associated_ctx(ctx))
goto out;
- /* channel switch in progress */
- if (priv->switch_rxon.switch_in_progress == true)
- goto out;
-
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the
* configuration for channel switch
*/
- if (priv->cfg->ops->lib->set_channel_switch(priv,
- ch_switch))
- priv->switch_rxon.switch_in_progress = false;
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
}
}
out:
mutex_unlock(&priv->mutex);
- if (!priv->switch_rxon.switch_in_progress)
- ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 86feec86d130..2282279cffc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -177,79 +177,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
return 0;
}
-static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch)
-{
- /*
- * MULTI-FIXME
- * See iwl_mac_channel_switch.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl6000_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
- u32 switch_time_in_usec, ucode_switch_time;
- u16 ch;
- u32 tsf_low;
- u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- struct iwl_host_cmd hcmd = {
- .id = REPLY_CHANNEL_SWITCH,
- .len = { sizeof(cmd), },
- .flags = CMD_SYNC,
- .data = { &cmd, },
- };
-
- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
- ch = ch_switch->channel->hw_value;
- IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
- ctx->active.channel, ch);
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
- switch_count = ch_switch->count;
- tsf_low = ch_switch->timestamp & 0x0ffffffff;
- /*
- * calculate the ucode channel switch time
- * adding TSF as one of the factor for when to switch
- */
- if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
- if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
- beacon_interval)) {
- switch_count -= (priv->ucode_beacon_time -
- tsf_low) / beacon_interval;
- } else
- switch_count = 0;
- }
- if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- else {
- switch_time_in_usec =
- vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_usecs_to_beacons(priv,
- switch_time_in_usec,
- beacon_interval);
- cmd.switch_time = iwl_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
- }
- IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
-
- return iwl_send_cmd_sync(priv, &hcmd);
-}
-
static struct iwl_lib_ops iwl2000_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.rx_handler_setup = iwlagn_rx_handler_setup,
@@ -258,7 +185,6 @@ static struct iwl_lib_ops iwl2000_lib = {
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
- .set_channel_switch = iwl2030_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
.config = iwl2000_nic_config,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a70b8cfafda1..f99f9c193352 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -331,8 +331,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
ctx->active.channel, ch);
return -EFAULT;
}
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}
@@ -425,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = {
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
};
#define IWL_DEVICE_5000 \
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index f8c710db6e6f..fbe565c816e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -270,8 +270,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
ctx->active.channel, ch);
return -EFAULT;
}
- priv->switch_rxon.channel = cmd.channel;
- priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
}
@@ -603,19 +601,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
IWL_DEVICE_6050,
};
+#define IWL_DEVICE_6150 \
+ .fw_name_pre = IWL6050_FW_PRE, \
+ .ucode_api_max = IWL6050_UCODE_API_MAX, \
+ .ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .ops = &iwl6150_ops, \
+ .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
+ .base_params = &iwl6050_base_params, \
+ .need_dc_calib = true, \
+ .led_mode = IWL_LED_BLINK, \
+ .internal_wimax_coex = true
+
struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
- .fw_name_pre = IWL6050_FW_PRE,
- .ucode_api_max = IWL6050_UCODE_API_MAX,
- .ucode_api_min = IWL6050_UCODE_API_MIN,
- .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
- .ops = &iwl6150_ops,
- .base_params = &iwl6050_base_params,
+ IWL_DEVICE_6150,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- .led_mode = IWL_LED_RF_STATE,
- .internal_wimax_coex = true,
+};
+
+struct iwl_cfg iwl6150_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+ IWL_DEVICE_6150,
};
struct iwl_cfg iwl6000_3agn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index b12c72d63ccb..23fa93deae96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
__le16 fc, __le32 *tx_flags)
{
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
- info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
+ info->flags & IEEE80211_TX_CTL_AMPDU)
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
- return;
- }
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation &&
- info->flags & IEEE80211_TX_CTL_AMPDU) {
- *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
- return;
- }
}
/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index a95ad84c5377..09f679d6046f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+ /*
+ * force CTS-to-self frames protection if RTS-CTS is not preferred
+ * one aggregation protection method
+ */
+ if (!(priv->cfg->ht_params &&
+ priv->cfg->ht_params->use_rts_for_aggregation))
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -342,10 +350,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* receive commit_rxon request
* abort any previous channel switch if still in process
*/
- if (priv->switch_rxon.switch_in_progress &&
- (priv->switch_rxon.channel != ctx->staging.channel)) {
+ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+ (priv->switch_channel != ctx->staging.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_rxon.channel));
+ le16_to_cpu(priv->switch_channel));
iwl_chswitch_done(priv, false);
}
@@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
memcpy(active, &ctx->staging, sizeof(*active));
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if after settings changed.
+ */
+ iwl_set_tx_power(priv, priv->tx_power_next, false);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 11c6c1169e78..8e1942ebd9a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2843,16 +2843,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
+ test_bit(STATUS_SCANNING, &priv->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_is_associated_ctx(ctx))
goto out;
- /* channel switch in progress */
- if (priv->switch_rxon.switch_in_progress == true)
- goto out;
-
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
@@ -2901,15 +2898,19 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
* at this point, staging_rxon has the
* configuration for channel switch
*/
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->ops->lib->set_channel_switch(priv,
- ch_switch))
- priv->switch_rxon.switch_in_progress = false;
+ ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
}
}
out:
mutex_unlock(&priv->mutex);
- if (!priv->switch_rxon.switch_in_progress)
- ieee80211_chswitch_done(ctx->vif, false);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -3831,11 +3832,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6150 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
/* 1000 Series WiFi */
{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2495fe7a58cb..d1716844002e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
extern struct iwl_cfg iwl6050_2agn_cfg;
extern struct iwl_cfg iwl6050_2abg_cfg;
extern struct iwl_cfg iwl6150_bgn_cfg;
+extern struct iwl_cfg iwl6150_bg_cfg;
extern struct iwl_cfg iwl1000_bgn_cfg;
extern struct iwl_cfg iwl1000_bg_cfg;
extern struct iwl_cfg iwl100_bgn_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4653deada05b..213c80c6a668 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -843,12 +843,8 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (priv->switch_rxon.switch_in_progress) {
+ if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
ieee80211_chswitch_done(ctx->vif, is_success);
- mutex_lock(&priv->mutex);
- priv->switch_rxon.switch_in_progress = false;
- mutex_unlock(&priv->mutex);
- }
}
#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3bb76f6ea410..a54d416ec345 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -560,6 +560,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
#define STATUS_DEVICE_ENABLED 18
+#define STATUS_CHANNEL_SWITCH_PENDING 19
static inline int iwl_is_ready(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 22a6e3ec7094..c8de236c141b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -982,17 +982,6 @@ struct traffic_stats {
};
/*
- * iwl_switch_rxon: "channel switch" structure
- *
- * @ switch_in_progress: channel switch in progress
- * @ channel: new channel
- */
-struct iwl_switch_rxon {
- bool switch_in_progress;
- __le16 channel;
-};
-
-/*
* schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
* to perform continuous uCode event logging operation if enabled
*/
@@ -1287,7 +1276,7 @@ struct iwl_priv {
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
- struct iwl_switch_rxon switch_rxon;
+ __le16 switch_channel;
struct {
u32 error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0053e9ea9021..b774517aa9fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -250,19 +250,19 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
- if (priv->switch_rxon.switch_in_progress) {
- if (!le32_to_cpu(csa->status) &&
- (csa->channel == priv->switch_rxon.channel)) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, false);
- }
+ iwl_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_chswitch_done(priv, false);
}
}
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 84566db486d2..71c8f3fccfa1 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
+ priv->seqnum++;
+ cmd->seqnum = cpu_to_le16(priv->seqnum);
priv->cur_cmd = cmdnode;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
/* Copy the incoming command to the buffer */
memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
- /* Set sequence number, clean result, move to buffer */
- priv->seqnum++;
+ /* Set command, clean result, move to buffer */
cmdnode->cmdbuf->command = cpu_to_le16(command);
cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
- cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
cmdnode->cmdbuf->result = 0;
lbs_deb_host("PREP_CMD: command 0x%04x\n", command);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index a7b5cb0c2753..224e9853c480 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -907,7 +907,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
card = sdio_get_drvdata(func);
cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
- if (ret)
+ if (ret || !cause)
goto out;
lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
@@ -1008,10 +1008,6 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto release;
- ret = sdio_claim_irq(func, if_sdio_interrupt);
- if (ret)
- goto disable;
-
/* For 1-bit transfers to the 8686 model, we need to enable the
* interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
* bit to allow access to non-vendor registers. */
@@ -1083,6 +1079,21 @@ static int if_sdio_probe(struct sdio_func *func,
card->rx_unit = 0;
/*
+ * Set up the interrupt handler late.
+ *
+ * If we set it up earlier, the (buggy) hardware generates a spurious
+ * interrupt, even before the interrupt has been enabled, with
+ * CCCR_INTx = 0.
+ *
+ * We register the interrupt handler late so that we can handle any
+ * spurious interrupts, and also to avoid generation of that known
+ * spurious interrupt in the first place.
+ */
+ ret = sdio_claim_irq(func, if_sdio_interrupt);
+ if (ret)
+ goto disable;
+
+ /*
* Enable interrupts now that everything is set up
*/
sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 660831ce293c..687c1f223497 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
*(unsigned long *) wdev_priv = (unsigned long) priv;
+ set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
+
ret = wiphy_register(wdev->wiphy);
if (ret < 0) {
dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a0e9bc5253e0..4e97e90aa399 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -167,8 +167,8 @@
/* Rx unit register */
#define CARD_RX_UNIT_REG 0x63
-/* Event header Len*/
-#define MWIFIEX_EVENT_HEADER_LEN 8
+/* Event header len w/o 4 bytes of interface header */
+#define MWIFIEX_EVENT_HEADER_LEN 4
/* Max retry number of CMD53 write */
#define MAX_WRITE_IOMEM_RETRY 2
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 32261189bcef..aeac3cc4dbe4 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2474,6 +2474,7 @@ struct mwl8k_cmd_set_hw_spec {
* faster client.
*/
#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
+#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2510,7 +2511,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
- MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY);
+ MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
+ MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9def1e5369a1..b2f8b8fd4d2d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -166,7 +166,6 @@ config RT2800USB_RT35XX
config RT2800USB_RT53XX
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
depends on EXPERIMENTAL
- default y
---help---
This adds support for rt53xx wireless chipset family to the
rt2800pci driver.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 555180d8f4aa..b704e5b183d0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
rt2x00link_reset_tuner(rt2x00dev, false);
- if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
+ if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
+ test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
(ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
(conf->flags & IEEE80211_CONF_PS)) {
beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index c018d67aab8e..939821b4af2f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
struct rt2x00_dev *rt2x00dev =
container_of(work, struct rt2x00_dev, autowakeup_work.work);
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return;
+
if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
ERROR(rt2x00dev, "Device failed to wakeup.\n");
clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
@@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work.
*/
cancel_work_sync(&rt2x00dev->intf_work);
+ cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
if (rt2x00_is_usb(rt2x00dev)) {
del_timer_sync(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a40952845436..9f8ccae93317 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -669,6 +669,14 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
&rx_status,
(u8 *) pdesc, skb);
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+ DBG_DMESG,
+ ("can't alloc skb for rx\n"));
+ goto done;
+ }
+
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
rtlpci->rxbuffersize,
@@ -690,7 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
- if (!stats.crc || !stats.hwerror) {
+ if (!stats.crc && !stats.hwerror) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
@@ -758,15 +766,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
rtl_lps_leave(hw);
}
- new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
- if (unlikely(!new_skb)) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
- DBG_DMESG,
- ("can't alloc skb for rx\n"));
- goto done;
- }
skb = new_skb;
- /*skb->dev = dev; */
rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
rx_ring
@@ -1113,6 +1113,13 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
rtlpci->rx_ring[rx_queue_idx].idx = 0;
+ /* If amsdu_8k is disabled, set buffersize to 4096. This
+ * change will reduce memory fragmentation.
+ */
+ if (rtlpci->rxbuffersize > 4096 &&
+ rtlpriv->rtlhal.disable_amsdu_8k)
+ rtlpci->rxbuffersize = 4096;
+
for (i = 0; i < rtlpci->rxringcount; i++) {
struct sk_buff *skb =
dev_alloc_skb(rtlpci->rxbuffersize);
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 1ab6c86aac40..c83fefb6662f 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
/* time to wait on the channel for passive scans (in TUs) */
u32 dwell_time_passive;
+ /* time to wait on the channel for DFS scans (in TUs) */
+ u32 dwell_time_dfs;
+
/* number of probe requests to send on each channel in active scans */
u8 num_probe_reqs;
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index bc00e52f6445..e6497dc669df 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
.min_dwell_time_active = 8,
.max_dwell_time_active = 30,
.dwell_time_passive = 100,
+ .dwell_time_dfs = 150,
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index f37e5a391976..56f76abc754d 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
u32 flags;
+ bool force_passive = !req->n_ssids;
for (i = 0, j = start;
i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
i++) {
flags = req->channels[i]->flags;
- if (!(flags & IEEE80211_CHAN_DISABLED) &&
- ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) &&
- ((flags & IEEE80211_CHAN_RADAR) == radar) &&
- (req->channels[i]->band == band)) {
+ if (force_passive)
+ flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if ((req->channels[i]->band == band) &&
+ !(flags & IEEE80211_CHAN_DISABLED) &&
+ (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
+ /* if radar is set, we ignore the passive flag */
+ (radar ||
+ !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
@@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
wl1271_debug(DEBUG_SCAN, "max_power %d",
req->channels[i]->max_power);
- if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ if (flags & IEEE80211_CHAN_RADAR) {
+ channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+ channels[j].passive_duration =
+ cpu_to_le16(c->dwell_time_dfs);
+ }
+ else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_passive);
} else {
@@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].max_duration =
cpu_to_le16(c->max_dwell_time_active);
}
- channels[j].tx_power_att = req->channels[j]->max_power;
+ channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
j++;
@@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_2GHZ,
false, false, idx);
- idx += cfg->active[0];
+ /*
+ * 5GHz channels always start at position 14, not immediately
+ * after the last 2.4GHz channel
+ */
+ idx = 14;
cfg->passive[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
false, true, idx);
idx += cfg->passive[1];
- cfg->active[1] =
+ cfg->dfs =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
- false, false, 14);
- idx += cfg->active[1];
+ true, true, idx);
+ idx += cfg->dfs;
- cfg->dfs =
+ cfg->active[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
- true, false, idx);
- idx += cfg->dfs;
+ false, false, idx);
+ idx += cfg->active[1];
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
cfg->active[1], cfg->passive[1]);
+ wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
return idx;
}
@@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl1271_cmd_sched_scan_config *cfg = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, total_channels, ret;
+ bool force_passive = !req->n_ssids;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
@@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
cfg->intervals[i] = cpu_to_le32(req->interval);
- if (req->ssids[0].ssid_len && req->ssids[0].ssid) {
+ if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
cfg->ssid_len = req->ssids[0].ssid_len;
memcpy(cfg->ssid, req->ssids[0].ssid,
@@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
goto out;
}
- if (cfg->active[0]) {
+ if (!force_passive && cfg->active[0]) {
ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
@@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
}
- if (cfg->active[1]) {
+ if (!force_passive && cfg->active[1]) {
ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index c83319579ca3..a0b6c5d67b07 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -137,6 +137,9 @@ enum {
SCAN_BSS_TYPE_ANY,
};
+#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
+#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
+
struct conn_scan_ch_params {
__le16 min_duration;
__le16 max_duration;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 0e819943b9e4..631194d49828 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
module_init(usb_init);
module_exit(usb_exit);
+static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
+ int *actual_length, int timeout)
+{
+ /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
+ * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
+ * descriptor.
+ */
+ struct usb_host_endpoint *ep;
+ unsigned int pipe;
+
+ pipe = usb_sndintpipe(udev, EP_REGS_OUT);
+ ep = usb_pipe_endpoint(udev, pipe);
+ if (!ep)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_int(&ep->desc)) {
+ return usb_interrupt_msg(udev, pipe, data, len,
+ actual_length, timeout);
+ } else {
+ pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
+ return usb_bulk_msg(udev, pipe, data, len, actual_length,
+ timeout);
+ }
+}
+
static int usb_int_regs_length(unsigned int count)
{
return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb);
- r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 50 /* ms */);
+ r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_interrupt_msg(). Error number %d\n", r);
+ "error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto error;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
+ dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
rw->value = cpu_to_le16(ioreqs[i].value);
}
- usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
- req, req_len, iowrite16v_urb_complete, usb,
- ep->desc.bInterval);
+ /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
+ * endpoint is bulk. Select correct type URB by endpoint descriptor.
+ */
+ if (usb_endpoint_xfer_int(&ep->desc))
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, iowrite16v_urb_complete, usb,
+ ep->desc.bInterval);
+ else
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+ req, req_len, iowrite16v_urb_complete, usb);
+
urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */
@@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
}
udev = zd_usb_to_usbdev(usb);
- r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 50 /* ms */);
+ r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_interrupt_msg(). Error number %d\n", r);
+ "error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto out;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
+ dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8b63a691a9ed..65200af29c52 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -670,7 +670,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
- if (depth != 1 ||
+ if (depth != 1 || !data ||
(strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
return 0;
@@ -679,16 +679,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
+ strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
#ifdef CONFIG_CMDLINE
#ifndef CONFIG_CMDLINE_FORCE
if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
#endif
- strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", cmd_line);
+ pr_debug("Command line is: %s\n", (char*)data);
/* break now */
return 1;
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a3984f4ef192..f34b5b29fb95 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,6 +141,13 @@ static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify,
};
+static void free_all_tasks(void)
+{
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+}
+
int sync_start(void)
{
int err;
@@ -148,8 +155,6 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- mutex_lock(&buffer_mutex);
-
err = task_handoff_register(&task_free_nb);
if (err)
goto out1;
@@ -166,7 +171,6 @@ int sync_start(void)
start_cpu_work();
out:
- mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -174,6 +178,7 @@ out3:
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
out2:
task_handoff_unregister(&task_free_nb);
+ free_all_tasks();
out1:
free_cpumask_var(marked_cpus);
goto out;
@@ -182,20 +187,16 @@ out1:
void sync_stop(void)
{
- /* flush buffers */
- mutex_lock(&buffer_mutex);
end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- mutex_unlock(&buffer_mutex);
- flush_cpu_work();
+ barrier(); /* do all of the above first */
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
+ flush_cpu_work();
+ free_all_tasks();
free_cpumask_var(marked_cpus);
}
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 4e70749f8d16..a8d5bb3cba89 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -11,7 +11,7 @@
#define EVENT_BUFFER_H
#include <linux/types.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
int alloc_event_buffer(void);
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f9bda64fcd1b..dccd8636095c 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -14,7 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/time.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
#include "oprof.h"
#include "event_buffer.h"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index c85f744270a5..094308e41be5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_X86_VISWS) += setup-irq.o
obj-$(CONFIG_MN10300) += setup-bus.o
obj-$(CONFIG_MICROBLAZE) += setup-bus.o
obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
#
# ACPI Related PCI FW Functions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 12e02bf92c4a..3dc9befa5aec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -698,12 +698,7 @@ int __init detect_intel_iommu(void)
{
#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
- /*
- * for now we will disable dma-remapping when interrupt
- * remapping is enabled.
- * When support for queued invalidation for IOTLB invalidation
- * is added, we will not need this any more.
- */
+
dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 6af6b628175b..f02c34d26d1b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -47,6 +47,8 @@
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
+#define IS_BRIDGE_HOST_DEVICE(pdev) \
+ ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -116,6 +118,11 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
return (pfn + level_size(level) - 1) & level_mask(level);
}
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+ return 1 << ((lvl - 1) * LEVEL_STRIDE);
+}
+
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@@ -143,6 +150,12 @@ static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
+ * set to 1 to panic kernel if can't successfully enable VT-d
+ * (used when kernel is launched w/ TXT)
+ */
+static int force_on = 0;
+
+/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
@@ -338,6 +351,9 @@ struct dmar_domain {
int iommu_coherency;/* indicate coherency of iommu access */
int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
};
@@ -387,6 +403,7 @@ int dmar_disabled = 1;
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
+static int intel_iommu_superpage = 1;
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
@@ -417,6 +434,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable batched IOTLB flush\n");
intel_iommu_strict = 1;
+ } else if (!strncmp(str, "sp_off", 6)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable supported super page\n");
+ intel_iommu_superpage = 0;
}
str += strcspn(str, ",");
@@ -555,11 +576,32 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
}
}
+static void domain_update_iommu_superpage(struct dmar_domain *domain)
+{
+ int i, mask = 0xf;
+
+ if (!intel_iommu_superpage) {
+ domain->iommu_superpage = 0;
+ return;
+ }
+
+ domain->iommu_superpage = 4; /* 1TiB */
+
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ mask |= cap_super_page_val(g_iommus[i]->cap);
+ if (!mask) {
+ break;
+ }
+ }
+ domain->iommu_superpage = fls(mask);
+}
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain_update_iommu_snooping(domain);
+ domain_update_iommu_superpage(domain);
}
static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
@@ -689,23 +731,31 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn)
+ unsigned long pfn, int large_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
- int offset;
+ int offset, target_level;
BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
+ /* Search pte */
+ if (!large_level)
+ target_level = 1;
+ else
+ target_level = large_level;
+
while (level > 0) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
- if (level == 1)
+ if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+ break;
+ if (level == target_level)
break;
if (!dma_pte_present(pte)) {
@@ -733,10 +783,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return pte;
}
+
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
- int level)
+ int level, int *large_page)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@@ -749,8 +800,16 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
if (level == total)
return pte;
- if (!dma_pte_present(pte))
+ if (!dma_pte_present(pte)) {
+ *large_page = total;
break;
+ }
+
+ if (pte->val & DMA_PTE_LARGE_PAGE) {
+ *large_page = total;
+ return pte;
+ }
+
parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
@@ -763,6 +822,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
@@ -771,14 +831,15 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
/* we don't need lock here; nobody else touches the iova range */
do {
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
+ large_page = 1;
+ first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, 2);
+ start_pfn = align_to_level(start_pfn + 1, large_page + 1);
continue;
}
- do {
+ do {
dma_clear_pte(pte);
- start_pfn++;
+ start_pfn += lvl_to_nr_pages(large_page);
pte++;
} while (start_pfn <= last_pfn && !first_pte_in_page(pte));
@@ -798,6 +859,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int total = agaw_to_level(domain->agaw);
int level;
unsigned long tmp;
+ int large_page = 2;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -813,7 +875,10 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
return;
do {
- first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+ large_page = level;
+ first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
+ if (large_page > level)
+ level = large_page + 1;
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
continue;
@@ -1397,6 +1462,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else
domain->iommu_snooping = 0;
+ domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
domain->iommu_count = 1;
domain->nid = iommu->node;
@@ -1417,6 +1483,10 @@ static void domain_exit(struct dmar_domain *domain)
if (!domain)
return;
+ /* Flush any lazy unmaps that may reference this domain */
+ if (!intel_iommu_strict)
+ flush_unmaps_timeout(0);
+
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
@@ -1648,6 +1718,34 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
+/* Return largest possible superpage level for a given mapping */
+static inline int hardware_largepage_caps(struct dmar_domain *domain,
+ unsigned long iov_pfn,
+ unsigned long phy_pfn,
+ unsigned long pages)
+{
+ int support, level = 1;
+ unsigned long pfnmerge;
+
+ support = domain->iommu_superpage;
+
+ /* To use a large page, the virtual *and* physical addresses
+ must be aligned to 2MiB/1GiB/etc. Lower bits set in either
+ of them will mean we have to use smaller pages. So just
+ merge them and check both at once. */
+ pfnmerge = iov_pfn | phy_pfn;
+
+ while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
+ pages >>= VTD_STRIDE_SHIFT;
+ if (!pages)
+ break;
+ pfnmerge >>= VTD_STRIDE_SHIFT;
+ level++;
+ support--;
+ }
+ return level;
+}
+
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
@@ -1656,6 +1754,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
phys_addr_t uninitialized_var(pteval);
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned long sg_res;
+ unsigned int largepage_lvl = 0;
+ unsigned long lvl_pages = 0;
BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
@@ -1671,7 +1771,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
- while (nr_pages--) {
+ while (nr_pages > 0) {
uint64_t tmp;
if (!sg_res) {
@@ -1679,11 +1779,21 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
+ phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
+
if (!pte) {
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
+ largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
if (!pte)
return -ENOMEM;
+ /* It is large page*/
+ if (largepage_lvl > 1)
+ pteval |= DMA_PTE_LARGE_PAGE;
+ else
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+
}
/* We don't need lock here, nobody else
* touches the iova range
@@ -1699,16 +1809,38 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
WARN_ON(1);
}
+
+ lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+ BUG_ON(nr_pages < lvl_pages);
+ BUG_ON(sg_res < lvl_pages);
+
+ nr_pages -= lvl_pages;
+ iov_pfn += lvl_pages;
+ phys_pfn += lvl_pages;
+ pteval += lvl_pages * VTD_PAGE_SIZE;
+ sg_res -= lvl_pages;
+
+ /* If the next PTE would be the first in a new page, then we
+ need to flush the cache on the entries we've just written.
+ And then we'll need to recalculate 'pte', so clear it and
+ let it get set again in the if (!pte) block above.
+
+ If we're done (!nr_pages) we need to flush the cache too.
+
+ Also if we've been setting superpages, we may need to
+ recalculate 'pte' and switch back to smaller pages for the
+ end of the mapping, if the trailing size is not enough to
+ use another superpage (i.e. sg_res < lvl_pages). */
pte++;
- if (!nr_pages || first_pte_in_page(pte)) {
+ if (!nr_pages || first_pte_in_page(pte) ||
+ (largepage_lvl > 1 && sg_res < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
- iov_pfn++;
- pteval += VTD_PAGE_SIZE;
- sg_res--;
- if (!sg_res)
+
+ if (!sg_res && nr_pages)
sg = sg_next(sg);
}
return 0;
@@ -2016,7 +2148,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
return iommu_prepare_identity_map(pdev, rmrr->base_address,
- rmrr->end_address + 1);
+ rmrr->end_address);
}
#ifdef CONFIG_DMAR_FLOPPY_WA
@@ -2030,7 +2162,7 @@ static inline void iommu_prepare_isa(void)
return;
printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
+ ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
if (ret)
printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@@ -2106,10 +2238,10 @@ static int identity_mapping(struct pci_dev *pdev)
if (likely(!iommu_identity_mapping))
return 0;
+ info = pdev->dev.archdata.iommu;
+ if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+ return (info->domain == si_domain);
- list_for_each_entry(info, &si_domain->devices, link)
- if (info->dev == pdev)
- return 1;
return 0;
}
@@ -2187,8 +2319,19 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
* Assume that they will -- if they turn out not to be, then we can
* take them out of the 1:1 domain later.
*/
- if (!startup)
- return pdev->dma_mask > DMA_BIT_MASK(32);
+ if (!startup) {
+ /*
+ * If the device's dma_mask is less than the system's memory
+ * size then this is not a candidate for identity mapping.
+ */
+ u64 dma_mask = pdev->dma_mask;
+
+ if (pdev->dev.coherent_dma_mask &&
+ pdev->dev.coherent_dma_mask < dma_mask)
+ dma_mask = pdev->dev.coherent_dma_mask;
+
+ return dma_mask >= dma_get_required_mask(&pdev->dev);
+ }
return 1;
}
@@ -2203,6 +2346,9 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return -EFAULT;
for_each_pci_dev(pdev) {
+ /* Skip Host/PCI Bridge devices */
+ if (IS_BRIDGE_HOST_DEVICE(pdev))
+ continue;
if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
hw ? "hardware" : "software", pci_name(pdev));
@@ -2218,7 +2364,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
-static int __init init_dmars(int force_on)
+static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@@ -2592,8 +2738,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
- pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
if (!iova)
goto error;
@@ -3118,7 +3263,17 @@ static int init_iommu_hw(void)
if (iommu->qi)
dmar_reenable_qi(iommu);
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ continue;
+ }
+
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@@ -3127,7 +3282,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
+ if (iommu_enable_translation(iommu))
+ return 1;
iommu_disable_protect_mem_regions(iommu);
}
@@ -3194,7 +3350,10 @@ static void iommu_resume(void)
unsigned long flag;
if (init_iommu_hw()) {
- WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
+ if (force_on)
+ panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
+ else
+ WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
return;
}
@@ -3229,7 +3388,7 @@ static void __init init_iommu_pm_ops(void)
}
#else
-static inline int init_iommu_pm_ops(void) { }
+static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
/*
@@ -3271,7 +3430,6 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
- int force_on = 0;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3309,7 +3467,7 @@ int __init intel_iommu_init(void)
init_no_remapping_devices();
- ret = init_dmars(force_on);
+ ret = init_dmars();
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
@@ -3380,8 +3538,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) {
info = list_entry(entry, struct device_domain_info, link);
- /* No need to compare PCI domain; it has to be the same */
- if (info->bus == pdev->bus->number &&
+ if (info->segment == pci_domain_nr(pdev->bus) &&
+ info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
list_del(&info->link);
list_del(&info->global);
@@ -3419,10 +3577,13 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
- spin_lock_irqsave(&iommu->lock, tmp_flags);
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
+ spin_lock_irqsave(&iommu->lock, tmp_flags);
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
+ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3505,6 +3666,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0;
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
domain->max_addr = 0;
domain->nid = -1;
@@ -3720,7 +3882,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
if (pte)
phys = dma_pte_addr(pte);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 9606e599a475..c5c274ab5c5a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -63,8 +63,16 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
curr = iovad->cached32_node;
cached_iova = container_of(curr, struct iova, node);
- if (free->pfn_lo >= cached_iova->pfn_lo)
- iovad->cached32_node = rb_next(&free->node);
+ if (free->pfn_lo >= cached_iova->pfn_lo) {
+ struct rb_node *node = rb_next(&free->node);
+ struct iova *iova = container_of(node, struct iova, node);
+
+ /* only cache if it's below 32bit pfn */
+ if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
+ iovad->cached32_node = node;
+ else
+ iovad->cached32_node = NULL;
+ }
}
/* Computes the padding size required, to make the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c3b18e78cee..d36f41ea8cbf 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D2;
case ACPI_STATE_D3:
return PCI_D3hot;
+ case ACPI_STATE_D3_COLD:
+ return PCI_D3cold;
}
return PCI_POWER_ERROR;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 135df164a4c1..46767c53917a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
* system from the sleep state, we'll have to prevent it from signaling
* wake-up.
*/
- pm_runtime_resume(dev);
+ pm_runtime_get_sync(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+
+ pm_runtime_put_sync(dev);
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 56098b3e17c0..2c5b9b991279 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,11 +3271,11 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
}
static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
- unsigned int command_bits, bool change_bridge)
+ unsigned int command_bits, u32 flags)
{
if (arch_set_vga_state)
return arch_set_vga_state(dev, decode, command_bits,
- change_bridge);
+ flags);
return 0;
}
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge_flags: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
* CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 48849ffdd672..bafb3c3d4a89 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+ mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e8a140669f90..02145e9697a9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 435002dfc3ca..712baab3c83d 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5cb999b50f95..45e0191c35dd 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -39,7 +39,7 @@ config ACER_WMI
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
- depends on THERMAL && THERMAL_HWMON && ACPI
+ depends on THERMAL && ACPI
---help---
This is a driver for Acer Aspire One netbooks. It allows to access
the temperature sensor and to control the fan.
@@ -760,4 +760,13 @@ config MXM_WMI
MXM is a standard for laptop graphics cards, the WMI interface
is required for switchable nvidia graphics machines
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
+ depends on ACPI
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
+ ---help---
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a7ab3bc7b3a1..afc1f832aa67 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ac4e7f83ce6c..005417bd429e 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -98,13 +98,26 @@ enum acer_wmi_event_ids {
static const struct key_entry acer_wmi_keymap[] = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
{KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_IGNORE, 0x41, {KEY_MUTE} },
+ {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
+ {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} },
+ {KE_IGNORE, 0x45, {KEY_STOP} },
+ {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
+ {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
+ {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
+ {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
+ {KE_IGNORE, 0x81, {KEY_SLEEP} },
{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
{KE_END, 0}
};
@@ -122,6 +135,7 @@ struct event_return_value {
*/
#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */
#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
+#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
struct lm_input_params {
@@ -737,8 +751,11 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
obj = (union acpi_object *) result.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
tmp = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u32) obj->integer.value;
} else {
tmp = 0;
}
@@ -866,8 +883,11 @@ static acpi_status WMID_set_capabilities(void)
obj = (union acpi_object *) out.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
} else {
kfree(out.pointer);
return AE_ERROR;
@@ -876,7 +896,8 @@ static acpi_status WMID_set_capabilities(void)
dmi_walk(type_aa_dmi_decode, NULL);
if (!has_type_aa) {
interface->capability |= ACER_CAP_WIRELESS;
- interface->capability |= ACER_CAP_THREEG;
+ if (devices & 0x40)
+ interface->capability |= ACER_CAP_THREEG;
if (devices & 0x10)
interface->capability |= ACER_CAP_BLUETOOTH;
}
@@ -961,10 +982,12 @@ static void __init acer_commandline_init(void)
* These will all fail silently if the value given is invalid, or the
* capability isn't available on the given interface
*/
- set_u32(mailled, ACER_CAP_MAILLED);
- if (!has_type_aa)
+ if (mailled >= 0)
+ set_u32(mailled, ACER_CAP_MAILLED);
+ if (!has_type_aa && threeg >= 0)
set_u32(threeg, ACER_CAP_THREEG);
- set_u32(brightness, ACER_CAP_BRIGHTNESS);
+ if (brightness >= 0)
+ set_u32(brightness, ACER_CAP_BRIGHTNESS);
}
/*
@@ -1081,7 +1104,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
return AE_ERROR;
}
if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return AE_ERROR;
}
@@ -1090,8 +1113,8 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
kfree(obj);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Get Device Status failed: "
- "0x%x - 0x%x\n", return_value.error_code,
+ pr_warn("Get Device Status failed: 0x%x - 0x%x\n",
+ return_value.error_code,
return_value.ec_return_value);
else
*value = !!(return_value.devices & device);
@@ -1124,6 +1147,114 @@ static acpi_status get_device_status(u32 *value, u32 cap)
}
}
+static acpi_status wmid3_set_device_status(u32 value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ u16 devices;
+ struct wmid3_gds_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = 0x01,
+ .devices = ACER_WMID3_GDS_WIRELESS &
+ ACER_WMID3_GDS_THREEG &
+ ACER_WMID3_GDS_WIMAX &
+ ACER_WMID3_GDS_BLUETOOTH,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value) {
+ pr_warning("Get Current Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+ return status;
+ }
+
+ devices = return_value.devices;
+ params.function_num = 0x2;
+ params.hotkey_number = 0x01;
+ params.devices = (value) ? (devices | device) : (devices & ~device);
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output2.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warning("Set Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static acpi_status set_device_status(u32 value, u32 cap)
+{
+ if (wmi_has_guid(WMID_GUID3)) {
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_set_device_status(value, device);
+
+ } else {
+ return set_u32(value, cap);
+ }
+}
+
/*
* Rfkill devices
*/
@@ -1160,7 +1291,7 @@ static int acer_rfkill_set(void *data, bool blocked)
u32 cap = (unsigned long)data;
if (rfkill_inited) {
- status = set_u32(!blocked, cap);
+ status = set_device_status(!blocked, cap);
if (ACPI_FAILURE(status))
return -ENODEV;
}
@@ -1317,7 +1448,7 @@ static void acer_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_warning("bad event status 0x%x\n", status);
+ pr_warn("bad event status 0x%x\n", status);
return;
}
@@ -1326,12 +1457,12 @@ static void acer_wmi_notify(u32 value, void *context)
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
- pr_warning("Unknown response received %d\n", obj->type);
+ pr_warn("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
@@ -1343,7 +1474,7 @@ static void acer_wmi_notify(u32 value, void *context)
case WMID_HOTKEY_EVENT:
if (return_value.device_state) {
u16 device_state = return_value.device_state;
- pr_debug("deivces states: 0x%x\n", device_state);
+ pr_debug("device state: 0x%x\n", device_state);
if (has_cap(ACER_CAP_WIRELESS))
rfkill_set_sw_state(wireless_rfkill,
!(device_state & ACER_WMID3_GDS_WIRELESS));
@@ -1356,11 +1487,11 @@ static void acer_wmi_notify(u32 value, void *context)
}
if (!sparse_keymap_report_event(acer_wmi_input_dev,
return_value.key_num, 1, true))
- pr_warning("Unknown key number - 0x%x\n",
+ pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
break;
default:
- pr_warning("Unknown function number - %d - %d\n",
+ pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
break;
}
@@ -1389,7 +1520,7 @@ wmid3_set_lm_mode(struct lm_input_params *params,
return AE_ERROR;
}
if (obj->buffer.length != 4) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return AE_ERROR;
}
@@ -1414,11 +1545,11 @@ static int acer_wmi_enable_ec_raw(void)
status = wmid3_set_lm_mode(&params, &return_value);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Enabling EC raw mode failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
+ pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
else
- pr_info("Enabled EC raw mode");
+ pr_info("Enabled EC raw mode\n");
return status;
}
@@ -1437,9 +1568,9 @@ static int acer_wmi_enable_lm(void)
status = wmid3_set_lm_mode(&params, &return_value);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Enabling Launch Manager failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
+ pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
return status;
}
@@ -1506,8 +1637,11 @@ static u32 get_wmid_devices(void)
obj = (union acpi_object *) out.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
}
kfree(out.pointer);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 60f9cfcac93f..fca3489218b7 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -35,10 +35,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/fs.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/sched.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/platform_device.h>
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index c53b3ff7978a..d65df92e2acc 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -318,7 +318,7 @@ static int acpi_check_handle(acpi_handle handle, const char *method,
if (status != AE_OK) {
if (ret)
- pr_warning("Error finding %s\n", method);
+ pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
@@ -383,7 +383,7 @@ static int asus_kled_lvl(struct asus_laptop *asus)
rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
&params, &kblv);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading kled level\n");
+ pr_warn("Error reading kled level\n");
return -ENODEV;
}
return kblv;
@@ -397,7 +397,7 @@ static int asus_kled_set(struct asus_laptop *asus, int kblv)
kblv = 0;
if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
- pr_warning("Keyboard LED display write failed\n");
+ pr_warn("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
@@ -531,7 +531,7 @@ static int asus_read_brightness(struct backlight_device *bd)
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
if (ACPI_FAILURE(rv))
- pr_warning("Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
return value;
}
@@ -541,7 +541,7 @@ static int asus_set_brightness(struct backlight_device *bd, int value)
struct asus_laptop *asus = bl_get_data(bd);
if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
- pr_warning("Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
return -EIO;
}
return 0;
@@ -730,7 +730,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
- pr_warning("LED display write failed\n");
+ pr_warn("LED display write failed\n");
return -ENODEV;
}
asus->ledd_status = (u32) value;
@@ -752,7 +752,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading Wireless status\n");
+ pr_warn("Error reading Wireless status\n");
return -EINVAL;
}
return !!(status & mask);
@@ -764,7 +764,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
static int asus_wlan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
- pr_warning("Error setting wlan status to %d", status);
+ pr_warn("Error setting wlan status to %d\n", status);
return -EIO;
}
return 0;
@@ -792,7 +792,7 @@ static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
static int asus_bluetooth_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
- pr_warning("Error setting bluetooth status to %d", status);
+ pr_warn("Error setting bluetooth status to %d\n", status);
return -EIO;
}
return 0;
@@ -821,7 +821,7 @@ static ssize_t store_bluetooth(struct device *dev,
static int asus_wimax_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
- pr_warning("Error setting wimax status to %d", status);
+ pr_warn("Error setting wimax status to %d\n", status);
return -EIO;
}
return 0;
@@ -850,7 +850,7 @@ static ssize_t store_wimax(struct device *dev,
static int asus_wwan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
- pr_warning("Error setting wwan status to %d", status);
+ pr_warn("Error setting wwan status to %d\n", status);
return -EIO;
}
return 0;
@@ -880,7 +880,7 @@ static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
- pr_warning("Error setting display\n");
+ pr_warn("Error setting display\n");
return;
}
@@ -909,7 +909,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
static void asus_als_switch(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
- pr_warning("Error setting light sensor switch\n");
+ pr_warn("Error setting light sensor switch\n");
asus->light_switch = value;
}
@@ -937,7 +937,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void asus_als_level(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
- pr_warning("Error setting light sensor level\n");
+ pr_warn("Error setting light sensor level\n");
asus->light_level = value;
}
@@ -976,7 +976,7 @@ static int asus_gps_status(struct asus_laptop *asus)
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading GPS status\n");
+ pr_warn("Error reading GPS status\n");
return -ENODEV;
}
return !!status;
@@ -1284,7 +1284,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
- pr_warning("Couldn't get the DSDT table header\n");
+ pr_warn("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
@@ -1296,7 +1296,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
status =
acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
- pr_warning("Error calling BSTS\n");
+ pr_warn("Error calling BSTS\n");
else if (bsts_result)
pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 832a3fd7c1c8..00460cb9587b 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -425,7 +425,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
if (asus->hotplug_slot) {
bus = pci_find_bus(0, 1);
if (!bus) {
- pr_warning("Unable to find PCI bus 1?\n");
+ pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
@@ -436,12 +436,12 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warning("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
- blocked ? "blocked" : "unblocked",
- absent ? "absent" : "present");
- pr_warning("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
goto out_unlock;
}
@@ -500,7 +500,7 @@ static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify, asus);
if (ACPI_FAILURE(status))
- pr_warning("Failed to register notify on %s\n", node);
+ pr_warn("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -1223,7 +1223,7 @@ static int asus_wmi_sysfs_init(struct platform_device *device)
/*
* Platform device
*/
-static int __init asus_wmi_platform_init(struct asus_wmi *asus)
+static int asus_wmi_platform_init(struct asus_wmi *asus)
{
int rv;
@@ -1583,12 +1583,12 @@ static int asus_wmi_probe(struct platform_device *pdev)
int ret;
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
- pr_warning("Management GUID not found\n");
+ pr_warn("Management GUID not found\n");
return -ENODEV;
}
if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
- pr_warning("Event GUID not found\n");
+ pr_warn("Event GUID not found\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index f503607c0645..d9312b3073e5 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -30,6 +30,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -581,8 +583,7 @@ static int read_led(const char *ledname, int ledmask)
if (read_acpi_int(NULL, ledname, &led_status))
return led_status;
else
- printk(KERN_WARNING "Asus ACPI: Error reading LED "
- "status\n");
+ pr_warn("Error reading LED status\n");
}
return (hotk->status & ledmask) ? 1 : 0;
}
@@ -621,8 +622,7 @@ write_led(const char __user *buffer, unsigned long count,
led_out = !led_out;
if (!write_acpi_int(hotk->handle, ledname, led_out, NULL))
- printk(KERN_WARNING "Asus ACPI: LED (%s) write failed\n",
- ledname);
+ pr_warn("LED (%s) write failed\n", ledname);
return rv;
}
@@ -679,8 +679,7 @@ static ssize_t ledd_proc_write(struct file *file, const char __user *buffer,
if (rv > 0) {
if (!write_acpi_int
(hotk->handle, hotk->methods->mt_ledd, value, NULL))
- printk(KERN_WARNING
- "Asus ACPI: LED display write failed\n");
+ pr_warn("LED display write failed\n");
else
hotk->ledd_status = (u32) value;
}
@@ -838,8 +837,7 @@ static int get_lcd_state(void)
} else {
/* We don't have to check anything if we are here */
if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
- printk(KERN_WARNING
- "Asus ACPI: Error reading LCD status\n");
+ pr_warn("Error reading LCD status\n");
if (hotk->model == L2D)
lcd = ~lcd;
@@ -871,7 +869,7 @@ static int set_lcd_state(int value)
the exact behaviour is simulated here */
}
if (ACPI_FAILURE(status))
- printk(KERN_WARNING "Asus ACPI: Error switching LCD\n");
+ pr_warn("Error switching LCD\n");
}
return 0;
@@ -915,13 +913,11 @@ static int read_brightness(struct backlight_device *bd)
if (hotk->methods->brightness_get) { /* SPLV/GPLV laptop */
if (!read_acpi_int(hotk->handle, hotk->methods->brightness_get,
&value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
} else if (hotk->methods->brightness_status) { /* For D1 for example */
if (!read_acpi_int(NULL, hotk->methods->brightness_status,
&value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
} else /* No GPLV method */
value = hotk->brightness;
return value;
@@ -939,8 +935,7 @@ static int set_brightness(int value)
if (hotk->methods->brightness_set) {
if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
value, NULL)) {
- printk(KERN_WARNING
- "Asus ACPI: Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
ret = -EIO;
}
goto out;
@@ -955,8 +950,7 @@ static int set_brightness(int value)
NULL, NULL);
(value > 0) ? value-- : value++;
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING
- "Asus ACPI: Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
ret = -EIO;
}
}
@@ -1008,7 +1002,7 @@ static void set_display(int value)
/* no sanity check needed for now */
if (!write_acpi_int(hotk->handle, hotk->methods->display_set,
value, NULL))
- printk(KERN_WARNING "Asus ACPI: Error setting display\n");
+ pr_warn("Error setting display\n");
return;
}
@@ -1021,8 +1015,7 @@ static int disp_proc_show(struct seq_file *m, void *v)
int value = 0;
if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading display status\n");
+ pr_warn("Error reading display status\n");
value &= 0x07; /* needed for some models, shouldn't hurt others */
seq_printf(m, "%d\n", value);
return 0;
@@ -1068,7 +1061,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
proc = proc_create_data(name, mode, acpi_device_dir(device),
proc_fops, acpi_driver_data(device));
if (!proc) {
- printk(KERN_WARNING " Unable to create %s fs entry\n", name);
+ pr_warn(" Unable to create %s fs entry\n", name);
return -1;
}
proc->uid = asus_uid;
@@ -1085,8 +1078,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
- printk(KERN_WARNING " asus_uid and asus_gid parameters are "
- "deprecated, use chown and chmod instead!\n");
+ pr_warn(" asus_uid and asus_gid parameters are "
+ "deprecated, use chown and chmod instead!\n");
}
acpi_device_dir(device) = asus_proc_dir;
@@ -1099,8 +1092,7 @@ static int asus_hotk_add_fs(struct acpi_device *device)
proc->uid = asus_uid;
proc->gid = asus_gid;
} else {
- printk(KERN_WARNING " Unable to create " PROC_INFO
- " fs entry\n");
+ pr_warn(" Unable to create " PROC_INFO " fs entry\n");
}
if (hotk->methods->mt_wled) {
@@ -1283,20 +1275,19 @@ static int asus_hotk_get_info(void)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Couldn't get the DSDT table header\n");
+ pr_warn(" Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
- printk(KERN_ERR " Hotkey initialization failed\n");
+ pr_err(" Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result))
- printk(KERN_WARNING " Error calling BSTS\n");
+ pr_warn(" Error calling BSTS\n");
else if (bsts_result)
- printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
- bsts_result);
+ pr_notice(" BSTS called, 0x%02x returned\n", bsts_result);
/*
* Try to match the object returned by INIT to the specific model.
@@ -1324,23 +1315,21 @@ static int asus_hotk_get_info(void)
if (asus_info &&
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30;
- printk(KERN_NOTICE
- " Samsung P30 detected, supported\n");
+ pr_notice(" Samsung P30 detected, supported\n");
hotk->methods = &model_conf[hotk->model];
kfree(model);
return 0;
} else {
hotk->model = M2E;
- printk(KERN_NOTICE " unsupported model %s, trying "
- "default values\n", string);
- printk(KERN_NOTICE
- " send /proc/acpi/dsdt to the developers\n");
+ pr_notice(" unsupported model %s, trying default values\n",
+ string);
+ pr_notice(" send /proc/acpi/dsdt to the developers\n");
kfree(model);
return -ENODEV;
}
}
hotk->methods = &model_conf[hotk->model];
- printk(KERN_NOTICE " %s model detected, supported\n", string);
+ pr_notice(" %s model detected, supported\n", string);
/* Sort of per-model blacklist */
if (strncmp(string, "L2B", 3) == 0)
@@ -1385,7 +1374,7 @@ static int asus_hotk_check(void)
if (hotk->device->status.present) {
result = asus_hotk_get_info();
} else {
- printk(KERN_ERR " Hotkey device not present, aborting\n");
+ pr_err(" Hotkey device not present, aborting\n");
return -EINVAL;
}
@@ -1399,8 +1388,7 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_status status = AE_OK;
int result;
- printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n",
- ASUS_ACPI_VERSION);
+ pr_notice("Asus Laptop ACPI Extras version %s\n", ASUS_ACPI_VERSION);
hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
if (!hotk)
@@ -1428,15 +1416,14 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_evaluate_object(NULL, hotk->methods->brightness_down,
NULL, NULL);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Error changing brightness\n");
+ pr_warn(" Error changing brightness\n");
else {
status =
acpi_evaluate_object(NULL,
hotk->methods->brightness_up,
NULL, NULL);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Strange, error changing"
- " brightness\n");
+ pr_warn(" Strange, error changing brightness\n");
}
}
@@ -1488,7 +1475,7 @@ static int __init asus_acpi_init(void)
asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
if (!asus_proc_dir) {
- printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
+ pr_err("Unable to create /proc entry\n");
acpi_bus_unregister_driver(&asus_hotk_driver);
return -ENODEV;
}
@@ -1513,7 +1500,7 @@ static int __init asus_acpi_init(void)
&asus_backlight_data,
&props);
if (IS_ERR(asus_backlight_device)) {
- printk(KERN_ERR "Could not register asus backlight device\n");
+ pr_err("Could not register asus backlight device\n");
asus_backlight_device = NULL;
asus_acpi_exit();
return -ENODEV;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index c16a27641ced..3f204fde1b02 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -68,6 +68,8 @@
* only enabled on a JHL90 board until it is verified that they work on the
* other boards too. See the extra_features variable. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -200,8 +202,8 @@ static bool extra_features;
* watching the output of address 0x4F (do an ec_transaction writing 0x33
* into 0x4F and read a few bytes from the output, like so:
* u8 writeData = 0x33;
- * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0);
- * That address is labelled "fan1 table information" in the service manual.
+ * ec_transaction(0x4F, &writeData, 1, buffer, 32);
+ * That address is labeled "fan1 table information" in the service manual.
* It should be clear which value in 'buffer' changes). This seems to be
* related to fan speed. It isn't a proper 'realtime' fan speed value
* though, because physically stopping or speeding up the fan doesn't
@@ -286,7 +288,7 @@ static int get_backlight_level(void)
static void set_backlight_state(bool on)
{
u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
- ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0);
+ ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
}
@@ -294,24 +296,24 @@ static void set_backlight_state(bool on)
static void pwm_enable_control(void)
{
unsigned char writeData = PWM_ENABLE_DATA;
- ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
}
static void pwm_disable_control(void)
{
unsigned char writeData = PWM_DISABLE_DATA;
- ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
}
static void set_pwm(int pwm)
{
- ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0);
+ ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
}
static int get_fan_rpm(void)
{
u8 value, data = FAN_DATA;
- ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0);
+ ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
return 100 * (int)value;
}
@@ -760,16 +762,14 @@ static struct rfkill *bt_rfkill;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n",
- id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
extra_features = false;
return 1;
}
static int dmi_check_cb_extra(const struct dmi_system_id *id)
{
- printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s', "
- "enabling extra features\n",
+ pr_info("Identified laptop model '%s', enabling extra features\n",
id->ident);
extra_features = true;
return 1;
@@ -956,14 +956,12 @@ static int __init compal_init(void)
int ret;
if (acpi_disabled) {
- printk(KERN_ERR DRIVER_NAME": ACPI needs to be enabled for "
- "this driver to work!\n");
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
return -ENODEV;
}
if (!force && !dmi_check_system(compal_dmi_table)) {
- printk(KERN_ERR DRIVER_NAME": Motherboard not recognized (You "
- "could try the module's force-parameter)");
+ pr_err("Motherboard not recognized (You could try the module's force-parameter)\n");
return -ENODEV;
}
@@ -998,8 +996,7 @@ static int __init compal_init(void)
if (ret)
goto err_rfkill;
- printk(KERN_INFO DRIVER_NAME": Driver "DRIVER_VERSION
- " successfully loaded\n");
+ pr_info("Driver " DRIVER_VERSION " successfully loaded\n");
return 0;
err_rfkill:
@@ -1064,7 +1061,7 @@ static void __exit compal_cleanup(void)
rfkill_destroy(wifi_rfkill);
rfkill_destroy(bt_rfkill);
- printk(KERN_INFO DRIVER_NAME": Driver unloaded\n");
+ pr_info("Driver unloaded\n");
}
static int __devexit compal_remove(struct platform_device *pdev)
@@ -1074,8 +1071,7 @@ static int __devexit compal_remove(struct platform_device *pdev)
if (!extra_features)
return 0;
- printk(KERN_INFO DRIVER_NAME": Unloading: resetting fan control "
- "to motherboard\n");
+ pr_info("Unloading: resetting fan control to motherboard\n");
pwm_disable_control();
data = platform_get_drvdata(pdev);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index de301aa8e5c3..d3841de6a8cf 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -434,8 +436,7 @@ static int __init dell_setup_rfkill(void)
int ret;
if (dmi_check_system(dell_blacklist)) {
- printk(KERN_INFO "dell-laptop: Blacklisted hardware detected - "
- "not enabling rfkill\n");
+ pr_info("Blacklisted hardware detected - not enabling rfkill\n");
return 0;
}
@@ -606,7 +607,7 @@ static int __init dell_init(void)
dmi_walk(find_tokens, NULL);
if (!da_tokens) {
- printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n");
+ pr_info("Unable to find dmi tokens\n");
return -ENODEV;
}
@@ -636,14 +637,13 @@ static int __init dell_init(void)
ret = dell_setup_rfkill();
if (ret) {
- printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
+ pr_warn("Unable to setup rfkill\n");
goto fail_rfkill;
}
ret = i8042_install_filter(dell_laptop_i8042_filter);
if (ret) {
- printk(KERN_WARNING
- "dell-laptop: Unable to install key filter\n");
+ pr_warn("Unable to install key filter\n");
goto fail_filter;
}
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index 0ed84573ae1f..3f945457f71c 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -15,6 +15,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -138,7 +139,7 @@ static int __init dell_wmi_aio_init(void)
guid = dell_wmi_aio_find();
if (!guid) {
- pr_warning("No known WMI GUID found\n");
+ pr_warn("No known WMI GUID found\n");
return -ENXIO;
}
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 77f1d55414c6..ce790827e199 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -23,6 +23,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -141,7 +143,7 @@ static void dell_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -153,8 +155,8 @@ static void dell_wmi_notify(u32 value, void *context)
u16 *buffer_entry = (u16 *)obj->buffer.pointer;
if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
- printk(KERN_INFO "dell-wmi: Received unknown WMI event"
- " (0x%x)\n", buffer_entry[1]);
+ pr_info("Received unknown WMI event (0x%x)\n",
+ buffer_entry[1]);
kfree(obj);
return;
}
@@ -167,8 +169,7 @@ static void dell_wmi_notify(u32 value, void *context)
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
reported_key);
if (!key) {
- printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- reported_key);
+ pr_info("Unknown key %x pressed\n", reported_key);
} else if ((key->keycode == KEY_BRIGHTNESSUP ||
key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
/* Don't report brightness notifications that will also
@@ -275,7 +276,7 @@ static int __init dell_wmi_init(void)
acpi_status status;
if (!wmi_has_guid(DELL_EVENT_GUID)) {
- printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
+ pr_warn("No known WMI GUID found\n");
return -ENODEV;
}
@@ -290,9 +291,7 @@ static int __init dell_wmi_init(void)
dell_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
dell_wmi_input_destroy();
- printk(KERN_ERR
- "dell-wmi: Unable to register notify handler - %d\n",
- status);
+ pr_err("Unable to register notify handler - %d\n", status);
return -ENODEV;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 2c1abf63957f..1c45d92e2163 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -228,7 +228,7 @@ static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
return -ENODEV;
if (write_acpi_int(eeepc->handle, method, value))
- pr_warning("Error writing %s\n", method);
+ pr_warn("Error writing %s\n", method);
return 0;
}
@@ -243,7 +243,7 @@ static int get_acpi(struct eeepc_laptop *eeepc, int cm)
return -ENODEV;
if (read_acpi_int(eeepc->handle, method, &value))
- pr_warning("Error reading %s\n", method);
+ pr_warn("Error reading %s\n", method);
return value;
}
@@ -261,7 +261,7 @@ static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
status = acpi_get_handle(eeepc->handle, (char *)method,
handle);
if (status != AE_OK) {
- pr_warning("Error finding %s\n", method);
+ pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
@@ -417,7 +417,7 @@ static ssize_t store_cpufv_disabled(struct device *dev,
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
- pr_warning("cpufv enabled (not officially supported "
+ pr_warn("cpufv enabled (not officially supported "
"on this model)\n");
eeepc->cpufv_disabled = false;
return rv;
@@ -609,7 +609,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
bus = port->subordinate;
if (!bus) {
- pr_warning("Unable to find PCI bus?\n");
+ pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
@@ -621,12 +621,12 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warning("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
+ pr_warn("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
- pr_warning("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
goto out_unlock;
}
@@ -691,7 +691,8 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
eeepc_rfkill_notify,
eeepc);
if (ACPI_FAILURE(status))
- pr_warning("Failed to register notify on %s\n", node);
+ pr_warn("Failed to register notify on %s\n", node);
+
/*
* Refresh pci hotplug in case the rfkill state was
* changed during setup.
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 649dcadd8ea3..4aa867a9b88b 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -84,7 +84,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
- pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID);
+ pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
*(bool *)context = true;
return AE_CTRL_TERMINATE;
}
@@ -105,12 +105,12 @@ static int eeepc_wmi_check_atkd(void)
static int eeepc_wmi_probe(struct platform_device *pdev)
{
if (eeepc_wmi_check_atkd()) {
- pr_warning("WMI device present, but legacy ATKD device is also "
- "present and enabled.");
- pr_warning("You probably booted with acpi_osi=\"Linux\" or "
- "acpi_osi=\"!Windows 2009\"");
- pr_warning("Can't load eeepc-wmi, use default acpi_osi "
- "(preferred) or eeepc-laptop");
+ pr_warn("WMI device present, but legacy ATKD device is also "
+ "present and enabled\n");
+ pr_warn("You probably booted with acpi_osi=\"Linux\" or "
+ "acpi_osi=\"!Windows 2009\"\n");
+ pr_warn("Can't load eeepc-wmi, use default acpi_osi "
+ "(preferred) or eeepc-laptop\n");
return -EBUSY;
}
return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 493054c2dbe1..6b26666b37f2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -56,6 +56,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -585,8 +587,7 @@ static struct platform_driver fujitsupf_driver = {
static void dmi_check_cb_common(const struct dmi_system_id *id)
{
acpi_handle handle;
- printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
- id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
if (use_alt_lcd_levels == -1) {
if (ACPI_SUCCESS(acpi_get_handle(NULL,
"\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
@@ -691,11 +692,11 @@ static int acpi_fujitsu_add(struct acpi_device *device)
result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
if (result) {
- printk(KERN_ERR "Error reading power state\n");
+ pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
- printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
+ pr_info("ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -707,7 +708,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (ACPI_FAILURE
(acpi_evaluate_object
(device->handle, METHOD_NAME__INI, NULL, NULL)))
- printk(KERN_ERR "_INI Method failed\n");
+ pr_err("_INI Method failed\n");
}
/* do config (detect defaults) */
@@ -827,7 +828,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
GFP_KERNEL);
if (error) {
- printk(KERN_ERR "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
goto err_stop;
}
@@ -859,13 +860,13 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
if (result) {
- printk(KERN_ERR "Error reading power state\n");
+ pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
- printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- !device->power.state ? "on" : "off");
+ pr_info("ACPI: %s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ !device->power.state ? "on" : "off");
fujitsu_hotkey->dev = device;
@@ -875,7 +876,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (ACPI_FAILURE
(acpi_evaluate_object
(device->handle, METHOD_NAME__INI, NULL, NULL)))
- printk(KERN_ERR "_INI Method failed\n");
+ pr_err("_INI Method failed\n");
}
i = 0;
@@ -897,8 +898,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
/* Suspect this is a keymap of the application panel, print it */
- printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n",
- call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
+ pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
@@ -907,8 +907,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (result == 0) {
fujitsu_hotkey->logolamp_registered = 1;
} else {
- printk(KERN_ERR "fujitsu-laptop: Could not register "
- "LED handler for logo lamp, error %i\n", result);
+ pr_err("Could not register LED handler for logo lamp, error %i\n",
+ result);
}
}
@@ -919,8 +919,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (result == 0) {
fujitsu_hotkey->kblamps_registered = 1;
} else {
- printk(KERN_ERR "fujitsu-laptop: Could not register "
- "LED handler for keyboard lamps, error %i\n", result);
+ pr_err("Could not register LED handler for keyboard lamps, error %i\n",
+ result);
}
}
#endif
@@ -1169,8 +1169,7 @@ static int __init fujitsu_init(void)
fujitsu->bl_device->props.power = 0;
}
- printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
- " successfully loaded.\n");
+ pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
@@ -1216,7 +1215,7 @@ static void __exit fujitsu_cleanup(void)
kfree(fujitsu);
- printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(fujitsu_init);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 067bf36d32f3..5a34973dc164 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -26,6 +26,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
@@ -238,7 +240,7 @@ static int hdaps_device_init(void)
__check_latch(0x1611, 0x01))
goto out;
- printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x).\n",
+ printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
__get_latch(0x1611));
outb(0x17, 0x1610);
@@ -299,7 +301,7 @@ static int hdaps_probe(struct platform_device *dev)
if (ret)
return ret;
- printk(KERN_INFO "hdaps: device successfully initialized.\n");
+ pr_info("device successfully initialized\n");
return 0;
}
@@ -480,7 +482,7 @@ static struct attribute_group hdaps_attribute_group = {
/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
static int __init hdaps_dmi_match(const struct dmi_system_id *id)
{
- printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
+ pr_info("%s detected\n", id->ident);
return 1;
}
@@ -488,8 +490,7 @@ static int __init hdaps_dmi_match(const struct dmi_system_id *id)
static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
{
hdaps_invert = (unsigned long)id->driver_data;
- printk(KERN_INFO "hdaps: inverting axis (%u) readings.\n",
- hdaps_invert);
+ pr_info("inverting axis (%u) readings\n", hdaps_invert);
return hdaps_dmi_match(id);
}
@@ -543,7 +544,7 @@ static int __init hdaps_init(void)
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
- printk(KERN_WARNING "hdaps: supported laptop not found!\n");
+ pr_warn("supported laptop not found!\n");
ret = -ENODEV;
goto out;
}
@@ -595,7 +596,7 @@ static int __init hdaps_init(void)
if (ret)
goto out_idev;
- printk(KERN_INFO "hdaps: driver successfully loaded.\n");
+ pr_info("driver successfully loaded\n");
return 0;
out_idev:
@@ -609,7 +610,7 @@ out_driver:
out_region:
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
out:
- printk(KERN_WARNING "hdaps: driver init failed (ret=%d)!\n", ret);
+ pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
@@ -622,7 +623,7 @@ static void __exit hdaps_exit(void)
platform_driver_unregister(&hdaps_driver);
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
- printk(KERN_INFO "hdaps: driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(hdaps_init);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1bc4a7539ba9..f94017bcdd6e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -54,9 +56,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
-#define PREFIX "HP WMI: "
-#define UNIMP "Unimplemented "
-
enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
@@ -228,9 +227,8 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
if (bios_return->return_code) {
if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
- printk(KERN_WARNING PREFIX "query 0x%x returned "
- "error 0x%x\n",
- query, bios_return->return_code);
+ pr_warn("query 0x%x returned error 0x%x\n",
+ query, bios_return->return_code);
kfree(obj);
return bios_return->return_code;
}
@@ -384,8 +382,7 @@ static int hp_wmi_rfkill2_refresh(void)
if (num >= state.count ||
devstate->rfkill_id != rfkill2[i].id) {
- printk(KERN_WARNING PREFIX "power configuration of "
- "the wireless devices unexpectedly changed\n");
+ pr_warn("power configuration of the wireless devices unexpectedly changed\n");
continue;
}
@@ -471,7 +468,7 @@ static void hp_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO PREFIX "bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -480,8 +477,7 @@ static void hp_wmi_notify(u32 value, void *context)
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
- printk(KERN_INFO "hp-wmi: Unknown response received %d\n",
- obj->type);
+ pr_info("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
@@ -498,8 +494,7 @@ static void hp_wmi_notify(u32 value, void *context)
event_id = *location;
event_data = *(location + 2);
} else {
- printk(KERN_INFO "hp-wmi: Unknown buffer length %d\n",
- obj->buffer.length);
+ pr_info("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
@@ -527,8 +522,7 @@ static void hp_wmi_notify(u32 value, void *context)
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
- printk(KERN_INFO PREFIX "Unknown key code - 0x%x\n",
- key_code);
+ pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_WIRELESS:
if (rfkill2_count) {
@@ -550,14 +544,12 @@ static void hp_wmi_notify(u32 value, void *context)
hp_wmi_get_hw_state(HPWMI_WWAN));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
- printk(KERN_INFO PREFIX UNIMP "CPU throttle because of 3 Cell"
- " battery event detected\n");
+ pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
break;
case HPWMI_LOCK_SWITCH:
break;
default:
- printk(KERN_INFO PREFIX "Unknown event_id - %d - 0x%x\n",
- event_id, event_data);
+ pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
}
}
@@ -705,7 +697,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
return err;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
- printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n");
+ pr_warn("unable to parse 0x1b query output\n");
return -EINVAL;
}
@@ -727,14 +719,14 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
name = "hp-wwan";
break;
default:
- printk(KERN_WARNING PREFIX "unknown device type 0x%x\n",
- state.device[i].radio_type);
+ pr_warn("unknown device type 0x%x\n",
+ state.device[i].radio_type);
continue;
}
if (!state.device[i].vendor_id) {
- printk(KERN_WARNING PREFIX "zero device %d while %d "
- "reported\n", i, state.count);
+ pr_warn("zero device %d while %d reported\n",
+ i, state.count);
continue;
}
@@ -755,8 +747,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
IS_HWBLOCKED(state.device[i].power));
if (!(state.device[i].power & HPWMI_POWER_BIOS))
- printk(KERN_INFO PREFIX "device %s blocked by BIOS\n",
- name);
+ pr_info("device %s blocked by BIOS\n", name);
err = rfkill_register(rfkill);
if (err) {
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index b1396e5b2953..811d436cd677 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -69,9 +71,10 @@ struct ibm_rtl_table {
#define RTL_SIGNATURE 0x0000005f4c54525fULL
#define RTL_MASK 0x000000ffffffffffULL
-#define RTL_DEBUG(A, ...) do { \
- if (debug) \
- pr_info("ibm-rtl: " A, ##__VA_ARGS__ ); \
+#define RTL_DEBUG(fmt, ...) \
+do { \
+ if (debug) \
+ pr_info(fmt, ##__VA_ARGS__); \
} while (0)
static DEFINE_MUTEX(rtl_lock);
@@ -114,7 +117,7 @@ static int ibm_rtl_write(u8 value)
int ret = 0, count = 0;
static u32 cmd_port_val;
- RTL_DEBUG("%s(%d)\n", __FUNCTION__, value);
+ RTL_DEBUG("%s(%d)\n", __func__, value);
value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM;
@@ -144,8 +147,8 @@ static int ibm_rtl_write(u8 value)
while (ioread8(&rtl_table->command)) {
msleep(10);
if (count++ > 500) {
- pr_err("ibm-rtl: Hardware not responding to "
- "mode switch request\n");
+ pr_err("Hardware not responding to "
+ "mode switch request\n");
ret = -EIO;
break;
}
@@ -250,7 +253,7 @@ static int __init ibm_rtl_init(void) {
int ret = -ENODEV, i;
if (force)
- pr_warning("ibm-rtl: module loaded by force\n");
+ pr_warn("module loaded by force\n");
/* first ensure that we are running on IBM HW */
else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
return -ENODEV;
@@ -288,19 +291,19 @@ static int __init ibm_rtl_init(void) {
if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
phys_addr_t addr;
unsigned int plen;
- RTL_DEBUG("found RTL_SIGNATURE at %#llx\n", (u64)tmp);
+ RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp);
rtl_table = tmp;
/* The address, value, width and offset are platform
* dependent and found in the ibm_rtl_table */
rtl_cmd_width = ioread8(&rtl_table->cmd_granularity);
rtl_cmd_type = ioread8(&rtl_table->cmd_address_type);
RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
- rtl_cmd_width, rtl_cmd_type);
+ rtl_cmd_width, rtl_cmd_type);
addr = ioread32(&rtl_table->cmd_port_address);
RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
plen = rtl_cmd_width/sizeof(char);
rtl_cmd_addr = rtl_port_map(addr, plen);
- RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr);
+ RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr);
if (!rtl_cmd_addr) {
ret = -ENOMEM;
break;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 21b101899bae..bfdda33feb26 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -20,6 +20,8 @@
* 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index eacd5da7dd24..809adea4965f 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -27,6 +27,8 @@
* to get/set bandwidth.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -135,8 +137,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
&temp);
- printk(KERN_INFO
- "Bandwidth value was %ld: status is %d\n", state, status);
+ pr_info("Bandwidth value was %ld: status is %d\n", state, status);
if (ACPI_FAILURE(status))
return -EFAULT;
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 213e79ba68d5..f1ae5078b7ec 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -23,58 +23,48 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/input.h>
+
#include <asm/intel_scu_ipc.h>
#define DRIVER_NAME "msic_power_btn"
-#define MSIC_IRQ_STAT 0x02
- #define MSIC_IRQ_PB (1 << 0)
-#define MSIC_PB_CONFIG 0x3e
#define MSIC_PB_STATUS 0x3f
- #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
-
-struct mfld_pb_priv {
- struct input_dev *input;
- unsigned int irq;
-};
+#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
{
- struct mfld_pb_priv *priv = dev_id;
+ struct input_dev *input = dev_id;
int ret;
u8 pbstat;
ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat);
- if (ret < 0)
- return IRQ_HANDLED;
-
- input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL));
- input_sync(priv->input);
+ if (ret < 0) {
+ dev_err(input->dev.parent, "Read error %d while reading"
+ " MSIC_PB_STATUS\n", ret);
+ } else {
+ input_event(input, EV_KEY, KEY_POWER,
+ !(pbstat & MSIC_PB_LEVEL));
+ input_sync(input);
+ }
return IRQ_HANDLED;
}
static int __devinit mfld_pb_probe(struct platform_device *pdev)
{
- struct mfld_pb_priv *priv;
struct input_dev *input;
- int irq;
+ int irq = platform_get_irq(pdev, 0);
int error;
- irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
- priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL);
input = input_allocate_device();
- if (!priv || !input) {
- error = -ENOMEM;
- goto err_free_mem;
+ if (!input) {
+ dev_err(&pdev->dev, "Input device allocation error\n");
+ return -ENOMEM;
}
- priv->input = input;
- priv->irq = irq;
-
input->name = pdev->name;
input->phys = "power-button/input0";
input->id.bustype = BUS_HOST;
@@ -82,42 +72,40 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr,
- 0, DRIVER_NAME, priv);
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ DRIVER_NAME, input);
if (error) {
- dev_err(&pdev->dev,
- "unable to request irq %d for mfld power button\n",
- irq);
- goto err_free_mem;
+ dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
+ "button\n", irq);
+ goto err_free_input;
}
error = input_register_device(input);
if (error) {
- dev_err(&pdev->dev,
- "unable to register input dev, error %d\n", error);
+ dev_err(&pdev->dev, "Unable to register input dev, error "
+ "%d\n", error);
goto err_free_irq;
}
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, input);
return 0;
err_free_irq:
- free_irq(priv->irq, priv);
-err_free_mem:
+ free_irq(irq, input);
+err_free_input:
input_free_device(input);
- kfree(priv);
return error;
}
static int __devexit mfld_pb_remove(struct platform_device *pdev)
{
- struct mfld_pb_priv *priv = platform_get_drvdata(pdev);
-
- free_irq(priv->irq, priv);
- input_unregister_device(priv->input);
- kfree(priv);
+ struct input_dev *input = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ free_irq(irq, input);
+ input_unregister_device(input);
platform_set_drvdata(pdev, NULL);
+
return 0;
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index c2f4bd8013b5..3a578323122b 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -37,49 +37,50 @@
#include <asm/intel_scu_ipc.h>
/* Number of thermal sensors */
-#define MSIC_THERMAL_SENSORS 4
+#define MSIC_THERMAL_SENSORS 4
/* ADC1 - thermal registers */
-#define MSIC_THERM_ADC1CNTL1 0x1C0
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
+#define MSIC_THERM_ADC1CNTL1 0x1C0
+#define MSIC_ADC_ENBL 0x10
+#define MSIC_ADC_START 0x08
-#define MSIC_THERM_ADC1CNTL3 0x1C2
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-#define MSIC_CHANL_MASK_VAL 0x0F
+#define MSIC_THERM_ADC1CNTL3 0x1C2
+#define MSIC_ADCTHERM_ENBL 0x04
+#define MSIC_ADCRRDATA_ENBL 0x05
+#define MSIC_CHANL_MASK_VAL 0x0F
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
+#define MSIC_STOPBIT_MASK 16
+#define MSIC_ADCTHERM_MASK 4
+/* Number of ADC channels */
+#define ADC_CHANLS_MAX 15
+#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
/* ADC channel code values */
-#define SKIN_SENSOR0_CODE 0x08
-#define SKIN_SENSOR1_CODE 0x09
-#define SYS_SENSOR_CODE 0x0A
-#define MSIC_DIE_SENSOR_CODE 0x03
+#define SKIN_SENSOR0_CODE 0x08
+#define SKIN_SENSOR1_CODE 0x09
+#define SYS_SENSOR_CODE 0x0A
+#define MSIC_DIE_SENSOR_CODE 0x03
-#define SKIN_THERM_SENSOR0 0
-#define SKIN_THERM_SENSOR1 1
-#define SYS_THERM_SENSOR2 2
-#define MSIC_DIE_THERM_SENSOR3 3
+#define SKIN_THERM_SENSOR0 0
+#define SKIN_THERM_SENSOR1 1
+#define SYS_THERM_SENSOR2 2
+#define MSIC_DIE_THERM_SENSOR3 3
/* ADC code range */
-#define ADC_MAX 977
-#define ADC_MIN 162
-#define ADC_VAL0C 887
-#define ADC_VAL20C 720
-#define ADC_VAL40C 508
-#define ADC_VAL60C 315
+#define ADC_MAX 977
+#define ADC_MIN 162
+#define ADC_VAL0C 887
+#define ADC_VAL20C 720
+#define ADC_VAL40C 508
+#define ADC_VAL60C 315
/* ADC base addresses */
-#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
+#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
+#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
/* MSIC die attributes */
-#define MSIC_DIE_ADC_MIN 488
-#define MSIC_DIE_ADC_MAX 1004
+#define MSIC_DIE_ADC_MIN 488
+#define MSIC_DIE_ADC_MAX 1004
/* This holds the address of the first free ADC channel,
* among the 15 channels
@@ -87,15 +88,15 @@
static int channel_index;
struct platform_info {
- struct platform_device *pdev;
- struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
+ struct platform_device *pdev;
+ struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
};
struct thermal_device_info {
- unsigned int chnl_addr;
- int direct;
- /* This holds the current temperature in millidegree celsius */
- long curr_temp;
+ unsigned int chnl_addr;
+ int direct;
+ /* This holds the current temperature in millidegree celsius */
+ long curr_temp;
};
/**
@@ -106,7 +107,7 @@ struct thermal_device_info {
*/
static int to_msic_die_temp(uint16_t adc_val)
{
- return (368 * (adc_val) / 1000) - 220;
+ return (368 * (adc_val) / 1000) - 220;
}
/**
@@ -118,7 +119,7 @@ static int to_msic_die_temp(uint16_t adc_val)
*/
static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
{
- return (adc_val >= min) && (adc_val <= max);
+ return (adc_val >= min) && (adc_val <= max);
}
/**
@@ -136,35 +137,35 @@ static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
*/
static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
{
- int temp;
-
- /* Direct conversion for die temperature */
- if (direct) {
- if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
- *tp = to_msic_die_temp(adc_val) * 1000;
- return 0;
- }
- return -ERANGE;
- }
-
- if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
- return -ERANGE;
-
- /* Linear approximation for skin temperature */
- if (adc_val > ADC_VAL0C)
- temp = 177 - (adc_val/5);
- else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
- temp = 111 - (adc_val/8);
- else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
- temp = 92 - (adc_val/10);
- else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
- temp = 91 - (adc_val/10);
- else
- temp = 112 - (adc_val/6);
-
- /* Convert temperature in celsius to milli degree celsius */
- *tp = temp * 1000;
- return 0;
+ int temp;
+
+ /* Direct conversion for die temperature */
+ if (direct) {
+ if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
+ *tp = to_msic_die_temp(adc_val) * 1000;
+ return 0;
+ }
+ return -ERANGE;
+ }
+
+ if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
+ return -ERANGE;
+
+ /* Linear approximation for skin temperature */
+ if (adc_val > ADC_VAL0C)
+ temp = 177 - (adc_val/5);
+ else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
+ temp = 111 - (adc_val/8);
+ else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
+ temp = 92 - (adc_val/10);
+ else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
+ temp = 91 - (adc_val/10);
+ else
+ temp = 112 - (adc_val/6);
+
+ /* Convert temperature in celsius to milli degree celsius */
+ *tp = temp * 1000;
+ return 0;
}
/**
@@ -178,47 +179,47 @@ static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
*/
static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
- struct thermal_device_info *td_info = tzd->devdata;
- uint16_t adc_val, addr;
- uint8_t data = 0;
- int ret;
- unsigned long curr_temp;
-
-
- addr = td_info->chnl_addr;
-
- /* Enable the msic for conversion before reading */
- ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
- if (ret)
- return ret;
-
- /* Re-toggle the RRDATARD bit (temporary workaround) */
- ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
- if (ret)
- return ret;
-
- /* Read the higher bits of data */
- ret = intel_scu_ipc_ioread8(addr, &data);
- if (ret)
- return ret;
-
- /* Shift bits to accommodate the lower two data bits */
- adc_val = (data << 2);
- addr++;
-
- ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
- if (ret)
- return ret;
-
- /* Adding lower two bits to the higher bits */
- data &= 03;
- adc_val += data;
-
- /* Convert ADC value to temperature */
- ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
- if (ret == 0)
- *temp = td_info->curr_temp = curr_temp;
- return ret;
+ struct thermal_device_info *td_info = tzd->devdata;
+ uint16_t adc_val, addr;
+ uint8_t data = 0;
+ int ret;
+ unsigned long curr_temp;
+
+
+ addr = td_info->chnl_addr;
+
+ /* Enable the msic for conversion before reading */
+ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
+ if (ret)
+ return ret;
+
+ /* Re-toggle the RRDATARD bit (temporary workaround) */
+ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
+ if (ret)
+ return ret;
+
+ /* Read the higher bits of data */
+ ret = intel_scu_ipc_ioread8(addr, &data);
+ if (ret)
+ return ret;
+
+ /* Shift bits to accommodate the lower two data bits */
+ adc_val = (data << 2);
+ addr++;
+
+ ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
+ if (ret)
+ return ret;
+
+ /* Adding lower two bits to the higher bits */
+ data &= 03;
+ adc_val += data;
+
+ /* Convert ADC value to temperature */
+ ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
+ if (ret == 0)
+ *temp = td_info->curr_temp = curr_temp;
+ return ret;
}
/**
@@ -231,22 +232,21 @@ static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
*/
static int configure_adc(int val)
{
- int ret;
- uint8_t data;
-
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if (val) {
- /* Enable and start the ADC */
- data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- } else {
- /* Just stop the ADC */
- data &= (~MSIC_ADC_START);
- }
-
- return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
+ int ret;
+ uint8_t data;
+
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
+ if (ret)
+ return ret;
+
+ if (val) {
+ /* Enable and start the ADC */
+ data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
+ } else {
+ /* Just stop the ADC */
+ data &= (~MSIC_ADC_START);
+ }
+ return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
}
/**
@@ -259,30 +259,30 @@ static int configure_adc(int val)
*/
static int set_up_therm_channel(u16 base_addr)
{
- int ret;
-
- /* Enable all the sensor channels */
- ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
- if (ret)
- return ret;
-
- ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
- if (ret)
- return ret;
-
- ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
- if (ret)
- return ret;
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- ret = intel_scu_ipc_iowrite8(base_addr + 3,
- (MSIC_DIE_SENSOR_CODE | 0x10));
- if (ret)
- return ret;
-
- /* Enable ADC and start it */
- return configure_adc(1);
+ int ret;
+
+ /* Enable all the sensor channels */
+ ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
+ if (ret)
+ return ret;
+
+ ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
+ if (ret)
+ return ret;
+
+ ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
+ if (ret)
+ return ret;
+
+ /* Since this is the last channel, set the stop bit
+ * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
+ ret = intel_scu_ipc_iowrite8(base_addr + 3,
+ (MSIC_DIE_SENSOR_CODE | 0x10));
+ if (ret)
+ return ret;
+
+ /* Enable ADC and start it */
+ return configure_adc(1);
}
/**
@@ -293,13 +293,13 @@ static int set_up_therm_channel(u16 base_addr)
*/
static int reset_stopbit(uint16_t addr)
{
- int ret;
- uint8_t data;
- ret = intel_scu_ipc_ioread8(addr, &data);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
+ int ret;
+ uint8_t data;
+ ret = intel_scu_ipc_ioread8(addr, &data);
+ if (ret)
+ return ret;
+ /* Set the stop bit to zero */
+ return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
}
/**
@@ -317,30 +317,30 @@ static int reset_stopbit(uint16_t addr)
*/
static int find_free_channel(void)
{
- int ret;
- int i;
- uint8_t data;
-
- /* check whether ADC is enabled */
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if ((data & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
- ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
+ int ret;
+ int i;
+ uint8_t data;
+
+ /* check whether ADC is enabled */
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MSIC_ADC_ENBL) == 0)
+ return 0;
+
+ /* ADC is already enabled; Looking for an empty channel */
+ for (i = 0; i < ADC_CHANLS_MAX; i++) {
+ ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
+ if (ret)
+ return ret;
+
+ if (data & MSIC_STOPBIT_MASK) {
+ ret = i;
+ break;
+ }
+ }
+ return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
}
/**
@@ -351,48 +351,48 @@ static int find_free_channel(void)
*/
static int mid_initialize_adc(struct device *dev)
{
- u8 data;
- u16 base_addr;
- int ret;
-
- /*
- * Ensure that adctherm is disabled before we
- * initialize the ADC
- */
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_ADCTHERM_MASK)
- dev_warn(dev, "ADCTHERM already set");
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- dev_err(dev, "No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- ret = set_up_therm_channel(base_addr);
- if (ret) {
- dev_err(dev, "unable to enable ADC");
- return ret;
- }
- dev_dbg(dev, "ADC initialization successful");
- return ret;
+ u8 data;
+ u16 base_addr;
+ int ret;
+
+ /*
+ * Ensure that adctherm is disabled before we
+ * initialize the ADC
+ */
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
+ if (ret)
+ return ret;
+
+ if (data & MSIC_ADCTHERM_MASK)
+ dev_warn(dev, "ADCTHERM already set");
+
+ /* Index of the first channel in which the stop bit is set */
+ channel_index = find_free_channel();
+ if (channel_index < 0) {
+ dev_err(dev, "No free ADC channels");
+ return channel_index;
+ }
+
+ base_addr = ADC_CHNL_START_ADDR + channel_index;
+
+ if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
+ /* Reset stop bit for channels other than 0 and 12 */
+ ret = reset_stopbit(base_addr);
+ if (ret)
+ return ret;
+
+ /* Index of the first free channel */
+ base_addr++;
+ channel_index++;
+ }
+
+ ret = set_up_therm_channel(base_addr);
+ if (ret) {
+ dev_err(dev, "unable to enable ADC");
+ return ret;
+ }
+ dev_dbg(dev, "ADC initialization successful");
+ return ret;
}
/**
@@ -403,18 +403,18 @@ static int mid_initialize_adc(struct device *dev)
*/
static struct thermal_device_info *initialize_sensor(int index)
{
- struct thermal_device_info *td_info =
- kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
-
- if (!td_info)
- return NULL;
-
- /* Set the base addr of the channel for this sensor */
- td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
- /* Sensor 3 is direct conversion */
- if (index == 3)
- td_info->direct = 1;
- return td_info;
+ struct thermal_device_info *td_info =
+ kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+ if (!td_info)
+ return NULL;
+
+ /* Set the base addr of the channel for this sensor */
+ td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
+ /* Sensor 3 is direct conversion */
+ if (index == 3)
+ td_info->direct = 1;
+ return td_info;
}
/**
@@ -425,7 +425,7 @@ static struct thermal_device_info *initialize_sensor(int index)
*/
static int mid_thermal_resume(struct platform_device *pdev)
{
- return mid_initialize_adc(&pdev->dev);
+ return mid_initialize_adc(&pdev->dev);
}
/**
@@ -437,12 +437,12 @@ static int mid_thermal_resume(struct platform_device *pdev)
*/
static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
{
- /*
- * This just stops the ADC and does not disable it.
- * temporary workaround until we have a generic ADC driver.
- * If 0 is passed, it disables the ADC.
- */
- return configure_adc(0);
+ /*
+ * This just stops the ADC and does not disable it.
+ * temporary workaround until we have a generic ADC driver.
+ * If 0 is passed, it disables the ADC.
+ */
+ return configure_adc(0);
}
/**
@@ -453,16 +453,15 @@ static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
*/
static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
- WARN_ON(tzd == NULL);
- return mid_read_temp(tzd, temp);
+ WARN_ON(tzd == NULL);
+ return mid_read_temp(tzd, temp);
}
/* Can't be const */
static struct thermal_zone_device_ops tzd_ops = {
- .get_temp = read_curr_temp,
+ .get_temp = read_curr_temp,
};
-
/**
* mid_thermal_probe - mfld thermal initialize
* @pdev: platform device structure
@@ -472,46 +471,45 @@ static struct thermal_zone_device_ops tzd_ops = {
*/
static int mid_thermal_probe(struct platform_device *pdev)
{
- static char *name[MSIC_THERMAL_SENSORS] = {
- "skin0", "skin1", "sys", "msicdie"
- };
-
- int ret;
- int i;
- struct platform_info *pinfo;
-
- pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
-
- /* Initializing the hardware */
- ret = mid_initialize_adc(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "ADC init failed");
- kfree(pinfo);
- return ret;
- }
-
- /* Register each sensor with the generic thermal framework*/
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, initialize_sensor(i),
- &tzd_ops, 0, 0, 0, 0);
- if (IS_ERR(pinfo->tzd[i]))
- goto reg_fail;
- }
-
- pinfo->pdev = pdev;
- platform_set_drvdata(pdev, pinfo);
- return 0;
+ static char *name[MSIC_THERMAL_SENSORS] = {
+ "skin0", "skin1", "sys", "msicdie"
+ };
+
+ int ret;
+ int i;
+ struct platform_info *pinfo;
+
+ pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ /* Initializing the hardware */
+ ret = mid_initialize_adc(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "ADC init failed");
+ kfree(pinfo);
+ return ret;
+ }
+
+ /* Register each sensor with the generic thermal framework*/
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ pinfo->tzd[i] = thermal_zone_device_register(name[i],
+ 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(pinfo->tzd[i]))
+ goto reg_fail;
+ }
+
+ pinfo->pdev = pdev;
+ platform_set_drvdata(pdev, pinfo);
+ return 0;
reg_fail:
- ret = PTR_ERR(pinfo->tzd[i]);
- while (--i >= 0)
- thermal_zone_device_unregister(pinfo->tzd[i]);
- configure_adc(0);
- kfree(pinfo);
- return ret;
+ ret = PTR_ERR(pinfo->tzd[i]);
+ while (--i >= 0)
+ thermal_zone_device_unregister(pinfo->tzd[i]);
+ configure_adc(0);
+ kfree(pinfo);
+ return ret;
}
/**
@@ -523,49 +521,46 @@ reg_fail:
*/
static int mid_thermal_remove(struct platform_device *pdev)
{
- int i;
- struct platform_info *pinfo = platform_get_drvdata(pdev);
+ int i;
+ struct platform_info *pinfo = platform_get_drvdata(pdev);
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
- thermal_zone_device_unregister(pinfo->tzd[i]);
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
+ thermal_zone_device_unregister(pinfo->tzd[i]);
- platform_set_drvdata(pdev, NULL);
+ kfree(pinfo);
+ platform_set_drvdata(pdev, NULL);
- /* Stop the ADC */
- return configure_adc(0);
+ /* Stop the ADC */
+ return configure_adc(0);
}
-/*********************************************************************
- * Driver initialisation and finalization
- *********************************************************************/
-
#define DRIVER_NAME "msic_sensor"
static const struct platform_device_id therm_id_table[] = {
- { DRIVER_NAME, 1 },
- { }
+ { DRIVER_NAME, 1 },
+ { }
};
static struct platform_driver mid_thermal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
- .probe = mid_thermal_probe,
- .suspend = mid_thermal_suspend,
- .resume = mid_thermal_resume,
- .remove = __devexit_p(mid_thermal_remove),
- .id_table = therm_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mid_thermal_probe,
+ .suspend = mid_thermal_suspend,
+ .resume = mid_thermal_resume,
+ .remove = __devexit_p(mid_thermal_remove),
+ .id_table = therm_id_table,
};
static int __init mid_thermal_module_init(void)
{
- return platform_driver_register(&mid_thermal_driver);
+ return platform_driver_register(&mid_thermal_driver);
}
static void __exit mid_thermal_module_exit(void)
{
- platform_driver_unregister(&mid_thermal_driver);
+ platform_driver_unregister(&mid_thermal_driver);
}
module_init(mid_thermal_module_init);
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
new file mode 100644
index 000000000000..e936364a609d
--- /dev/null
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -0,0 +1,396 @@
+/*
+ * intel_oaktrail.c - Intel OakTrail Platform support.
+ *
+ * Copyright (C) 2010-2011 Intel Corporation
+ * Author: Yin Kangkai (kangkai.yin@intel.com)
+ *
+ * based on Compal driver, Copyright (C) 2008 Cezary Jackiewicz
+ * <cezary.jackiewicz (at) gmail.com>, based on MSI driver
+ * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ * This driver does below things:
+ * 1. registers itself in the Linux backlight control in
+ * /sys/class/backlight/intel_oaktrail/
+ *
+ * 2. registers in the rfkill subsystem here: /sys/class/rfkill/rfkillX/
+ * for these components: wifi, bluetooth, wwan (3g), gps
+ *
+ * This driver might work on other products based on Oaktrail. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * product as compatible.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/rfkill.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+
+#define DRIVER_NAME "intel_oaktrail"
+#define DRIVER_VERSION "0.4ac1"
+
+/*
+ * This is the devices status address in EC space, and the control bits
+ * definition:
+ *
+ * (1 << 0): Camera enable/disable, RW.
+ * (1 << 1): Bluetooth enable/disable, RW.
+ * (1 << 2): GPS enable/disable, RW.
+ * (1 << 3): WiFi enable/disable, RW.
+ * (1 << 4): WWAN (3G) enable/disalbe, RW.
+ * (1 << 5): Touchscreen enable/disable, Read Only.
+ */
+#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
+
+#define OT_EC_CAMERA_MASK (1 << 0)
+#define OT_EC_BT_MASK (1 << 1)
+#define OT_EC_GPS_MASK (1 << 2)
+#define OT_EC_WIFI_MASK (1 << 3)
+#define OT_EC_WWAN_MASK (1 << 4)
+#define OT_EC_TS_MASK (1 << 5)
+
+/*
+ * This is the address in EC space and commands used to control LCD backlight:
+ *
+ * Two steps needed to change the LCD backlight:
+ * 1. write the backlight percentage into OT_EC_BL_BRIGHTNESS_ADDRESS;
+ * 2. write OT_EC_BL_CONTROL_ON_DATA into OT_EC_BL_CONTROL_ADDRESS.
+ *
+ * To read the LCD back light, just read out the value from
+ * OT_EC_BL_BRIGHTNESS_ADDRESS.
+ *
+ * LCD backlight brightness range: 0 - 100 (OT_EC_BL_BRIGHTNESS_MAX)
+ */
+#define OT_EC_BL_BRIGHTNESS_ADDRESS 0x44
+#define OT_EC_BL_BRIGHTNESS_MAX 100
+#define OT_EC_BL_CONTROL_ADDRESS 0x3A
+#define OT_EC_BL_CONTROL_ON_DATA 0x1A
+
+
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static struct platform_device *oaktrail_device;
+static struct backlight_device *oaktrail_bl_device;
+static struct rfkill *bt_rfkill;
+static struct rfkill *gps_rfkill;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *wwan_rfkill;
+
+
+/* rfkill */
+static int oaktrail_rfkill_set(void *data, bool blocked)
+{
+ u8 value;
+ u8 result;
+ unsigned long radio = (unsigned long) data;
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &result);
+
+ if (!blocked)
+ value = (u8) (result | radio);
+ else
+ value = (u8) (result & ~radio);
+
+ ec_write(OT_EC_DEVICE_STATE_ADDRESS, value);
+
+ return 0;
+}
+
+static const struct rfkill_ops oaktrail_rfkill_ops = {
+ .set_block = oaktrail_rfkill_set,
+};
+
+static struct rfkill *oaktrail_rfkill_new(char *name, enum rfkill_type type,
+ unsigned long mask)
+{
+ struct rfkill *rfkill_dev;
+ u8 value;
+ int err;
+
+ rfkill_dev = rfkill_alloc(name, &oaktrail_device->dev, type,
+ &oaktrail_rfkill_ops, (void *)mask);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &value);
+ rfkill_init_sw_state(rfkill_dev, (value & mask) != 1);
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ rfkill_destroy(rfkill_dev);
+ return ERR_PTR(err);
+ }
+
+ return rfkill_dev;
+}
+
+static inline void __oaktrail_rfkill_cleanup(struct rfkill *rf)
+{
+ if (rf) {
+ rfkill_unregister(rf);
+ rfkill_destroy(rf);
+ }
+}
+
+static void oaktrail_rfkill_cleanup(void)
+{
+ __oaktrail_rfkill_cleanup(wifi_rfkill);
+ __oaktrail_rfkill_cleanup(bt_rfkill);
+ __oaktrail_rfkill_cleanup(gps_rfkill);
+ __oaktrail_rfkill_cleanup(wwan_rfkill);
+}
+
+static int oaktrail_rfkill_init(void)
+{
+ int ret;
+
+ wifi_rfkill = oaktrail_rfkill_new("oaktrail-wifi",
+ RFKILL_TYPE_WLAN,
+ OT_EC_WIFI_MASK);
+ if (IS_ERR(wifi_rfkill)) {
+ ret = PTR_ERR(wifi_rfkill);
+ wifi_rfkill = NULL;
+ goto cleanup;
+ }
+
+ bt_rfkill = oaktrail_rfkill_new("oaktrail-bluetooth",
+ RFKILL_TYPE_BLUETOOTH,
+ OT_EC_BT_MASK);
+ if (IS_ERR(bt_rfkill)) {
+ ret = PTR_ERR(bt_rfkill);
+ bt_rfkill = NULL;
+ goto cleanup;
+ }
+
+ gps_rfkill = oaktrail_rfkill_new("oaktrail-gps",
+ RFKILL_TYPE_GPS,
+ OT_EC_GPS_MASK);
+ if (IS_ERR(gps_rfkill)) {
+ ret = PTR_ERR(gps_rfkill);
+ gps_rfkill = NULL;
+ goto cleanup;
+ }
+
+ wwan_rfkill = oaktrail_rfkill_new("oaktrail-wwan",
+ RFKILL_TYPE_WWAN,
+ OT_EC_WWAN_MASK);
+ if (IS_ERR(wwan_rfkill)) {
+ ret = PTR_ERR(wwan_rfkill);
+ wwan_rfkill = NULL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ oaktrail_rfkill_cleanup();
+ return ret;
+}
+
+
+/* backlight */
+static int get_backlight_brightness(struct backlight_device *b)
+{
+ u8 value;
+ ec_read(OT_EC_BL_BRIGHTNESS_ADDRESS, &value);
+
+ return value;
+}
+
+static int set_backlight_brightness(struct backlight_device *b)
+{
+ u8 percent = (u8) b->props.brightness;
+ if (percent < 0 || percent > OT_EC_BL_BRIGHTNESS_MAX)
+ return -EINVAL;
+
+ ec_write(OT_EC_BL_BRIGHTNESS_ADDRESS, percent);
+ ec_write(OT_EC_BL_CONTROL_ADDRESS, OT_EC_BL_CONTROL_ON_DATA);
+
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_bl_ops = {
+ .get_brightness = get_backlight_brightness,
+ .update_status = set_backlight_brightness,
+};
+
+static int oaktrail_backlight_init(void)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
+ bd = backlight_device_register(DRIVER_NAME,
+ &oaktrail_device->dev, NULL,
+ &oaktrail_bl_ops,
+ &props);
+
+ if (IS_ERR(bd)) {
+ oaktrail_bl_device = NULL;
+ pr_warning("Unable to register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ oaktrail_bl_device = bd;
+
+ bd->props.brightness = get_backlight_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void oaktrail_backlight_exit(void)
+{
+ if (oaktrail_bl_device)
+ backlight_device_unregister(oaktrail_bl_device);
+}
+
+static int __devinit oaktrail_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __devexit oaktrail_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver oaktrail_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = oaktrail_probe,
+ .remove = __devexit_p(oaktrail_remove)
+};
+
+static int dmi_check_cb(const struct dmi_system_id *id)
+{
+ pr_info("Identified model '%s'\n", id->ident);
+ return 0;
+}
+
+static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
+ {
+ .ident = "OakTrail platform",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OakTrail platform"),
+ },
+ .callback = dmi_check_cb
+ },
+ { }
+};
+
+static int __init oaktrail_init(void)
+{
+ int ret;
+
+ if (acpi_disabled) {
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
+ return -ENODEV;
+ }
+
+ if (!force && !dmi_check_system(oaktrail_dmi_table)) {
+ pr_err("Platform not recognized (You could try the module's force-parameter)");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&oaktrail_driver);
+ if (ret) {
+ pr_warning("Unable to register platform driver\n");
+ goto err_driver_reg;
+ }
+
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
+ if (!oaktrail_device) {
+ pr_warning("Unable to allocate platform device\n");
+ ret = -ENOMEM;
+ goto err_device_alloc;
+ }
+
+ ret = platform_device_add(oaktrail_device);
+ if (ret) {
+ pr_warning("Unable to add platform device\n");
+ goto err_device_add;
+ }
+
+ if (!acpi_video_backlight_support()) {
+ ret = oaktrail_backlight_init();
+ if (ret)
+ goto err_backlight;
+
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
+
+ ret = oaktrail_rfkill_init();
+ if (ret) {
+ pr_warning("Setup rfkill failed\n");
+ goto err_rfkill;
+ }
+
+ pr_info("Driver "DRIVER_VERSION" successfully loaded\n");
+ return 0;
+
+err_rfkill:
+ oaktrail_backlight_exit();
+err_backlight:
+ platform_device_del(oaktrail_device);
+err_device_add:
+ platform_device_put(oaktrail_device);
+err_device_alloc:
+ platform_driver_unregister(&oaktrail_driver);
+err_driver_reg:
+
+ return ret;
+}
+
+static void __exit oaktrail_cleanup(void)
+{
+ oaktrail_backlight_exit();
+ oaktrail_rfkill_cleanup();
+ platform_device_unregister(oaktrail_device);
+ platform_driver_unregister(&oaktrail_driver);
+
+ pr_info("Driver unloaded\n");
+}
+
+module_init(oaktrail_init);
+module_exit(oaktrail_cleanup);
+
+MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("dmi:*:svnIntelCorporation:pnOakTrailplatform:*");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 464bb3fc4d88..1686c1e07d5d 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -19,6 +19,8 @@
* Moorestown platform PMIC chip
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -90,8 +92,7 @@ static void pmic_program_irqtype(int gpio, int type)
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
- printk(KERN_ERR
- "%s: only pin 0-7 support input\n", __func__);
+ pr_err("only pin 0-7 support input\n");
return -1;/* we only have 8 GPIO can use as input */
}
return intel_scu_ipc_update_register(GPIO0 + offset,
@@ -116,8 +117,7 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
value ? 1 << (offset - 16) : 0,
1 << (offset - 16));
else {
- printk(KERN_ERR
- "%s: invalid PMIC GPIO pin %d!\n", __func__, offset);
+ pr_err("invalid PMIC GPIO pin %d!\n", offset);
WARN_ON(1);
}
@@ -260,7 +260,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
/* setting up SRAM mapping for GPIOINT register */
pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
if (!pg->gpiointr) {
- printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
+ pr_err("Can not map GPIOINT\n");
retval = -EINVAL;
goto err2;
}
@@ -281,13 +281,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
if (retval) {
- printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
+ pr_err("Can not add pmic gpio chip\n");
goto err;
}
retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
if (retval) {
- printk(KERN_WARNING "pmic: Interrupt request failed\n");
+ pr_warn("Interrupt request failed\n");
goto err;
}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 23fb2afda00b..3ff629df9f01 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
- NULL, 0, 1);
+ NULL, 0);
}
static int get_lcd_level(void)
@@ -144,7 +144,7 @@ static int get_lcd_level(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
- NULL, 0, 1);
+ NULL, 0);
}
static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
return -1;
@@ -447,7 +447,7 @@ static struct platform_device *msipf_device;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- pr_info("Identified laptop model '%s'.\n", id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
return 1;
}
@@ -800,7 +800,7 @@ static void msi_laptop_input_destroy(void)
input_unregister_device(msi_laptop_input_dev);
}
-static int load_scm_model_init(struct platform_device *sdev)
+static int __init load_scm_model_init(struct platform_device *sdev)
{
u8 data;
int result;
@@ -875,8 +875,7 @@ static int __init msi_init(void)
/* Register backlight stuff */
if (acpi_video_backlight_support()) {
- pr_info("Brightness ignored, must be controlled "
- "by ACPI video driver\n");
+ pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
} else {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -930,7 +929,7 @@ static int __init msi_init(void)
if (auto_brightness != 2)
set_auto_brightness(auto_brightness);
- pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n");
+ pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
return 0;
@@ -978,7 +977,7 @@ static void __exit msi_cleanup(void)
if (auto_brightness != 2)
set_auto_brightness(1);
- pr_info("driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(msi_init);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index d5419c9ec07a..c832e3356cd6 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/input.h>
@@ -36,13 +37,10 @@ MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
#define DRV_NAME "msi-wmi"
-#define DRV_PFX DRV_NAME ": "
#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
-#define dprintk(msg...) pr_debug(DRV_PFX msg)
-
#define SCANCODE_BASE 0xD0
#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
@@ -78,7 +76,7 @@ static int msi_wmi_query_block(int instance, int *ret)
if (!obj || obj->type != ACPI_TYPE_INTEGER) {
if (obj) {
- printk(KERN_ERR DRV_PFX "query block returned object "
+ pr_err("query block returned object "
"type: %d - buffer length:%d\n", obj->type,
obj->type == ACPI_TYPE_BUFFER ?
obj->buffer.length : 0);
@@ -97,8 +95,8 @@ static int msi_wmi_set_block(int instance, int value)
struct acpi_buffer input = { sizeof(int), &value };
- dprintk("Going to set block of instance: %d - value: %d\n",
- instance, value);
+ pr_debug("Going to set block of instance: %d - value: %d\n",
+ instance, value);
status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
@@ -112,20 +110,19 @@ static int bl_get(struct backlight_device *bd)
/* Instance 1 is "get backlight", cmp with DSDT */
err = msi_wmi_query_block(1, &ret);
if (err) {
- printk(KERN_ERR DRV_PFX "Could not query backlight: %d\n", err);
+ pr_err("Could not query backlight: %d\n", err);
return -EINVAL;
}
- dprintk("Get: Query block returned: %d\n", ret);
+ pr_debug("Get: Query block returned: %d\n", ret);
for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
if (backlight_map[level] == ret) {
- dprintk("Current backlight level: 0x%X - index: %d\n",
- backlight_map[level], level);
+ pr_debug("Current backlight level: 0x%X - index: %d\n",
+ backlight_map[level], level);
break;
}
}
if (level == ARRAY_SIZE(backlight_map)) {
- printk(KERN_ERR DRV_PFX "get: Invalid brightness value: 0x%X\n",
- ret);
+ pr_err("get: Invalid brightness value: 0x%X\n", ret);
return -EINVAL;
}
return level;
@@ -156,7 +153,7 @@ static void msi_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -164,7 +161,7 @@ static void msi_wmi_notify(u32 value, void *context)
if (obj && obj->type == ACPI_TYPE_INTEGER) {
int eventcode = obj->integer.value;
- dprintk("Eventcode: 0x%x\n", eventcode);
+ pr_debug("Eventcode: 0x%x\n", eventcode);
key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
eventcode);
if (key) {
@@ -175,8 +172,8 @@ static void msi_wmi_notify(u32 value, void *context)
/* Ignore event if the same event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
- dprintk("Suppressed key event 0x%X - "
- "Last press was %lld us ago\n",
+ pr_debug("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
return;
}
@@ -187,17 +184,16 @@ static void msi_wmi_notify(u32 value, void *context)
(!acpi_video_backlight_support() ||
(key->code != MSI_WMI_BRIGHTNESSUP &&
key->code != MSI_WMI_BRIGHTNESSDOWN))) {
- dprintk("Send key: 0x%X - "
- "Input layer keycode: %d\n", key->code,
- key->keycode);
+ pr_debug("Send key: 0x%X - "
+ "Input layer keycode: %d\n",
+ key->code, key->keycode);
sparse_keymap_report_entry(msi_wmi_input_dev,
key, 1, true);
}
} else
- printk(KERN_INFO "Unknown key pressed - %x\n",
- eventcode);
+ pr_info("Unknown key pressed - %x\n", eventcode);
} else
- printk(KERN_INFO DRV_PFX "Unknown event received\n");
+ pr_info("Unknown event received\n");
kfree(response.pointer);
}
@@ -238,8 +234,7 @@ static int __init msi_wmi_init(void)
int err;
if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
- printk(KERN_ERR
- "This machine doesn't have MSI-hotkeys through WMI\n");
+ pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
return -ENODEV;
}
err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
@@ -270,7 +265,7 @@ static int __init msi_wmi_init(void)
backlight->props.brightness = err;
}
- dprintk("Event handler installed\n");
+ pr_debug("Event handler installed\n");
return 0;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 6fe8cd6e23b5..bbd182e178cb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -42,6 +42,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -70,10 +72,10 @@
#include <linux/miscdevice.h>
#endif
-#define DRV_PFX "sony-laptop: "
-#define dprintk(msg...) do { \
- if (debug) \
- pr_warn(DRV_PFX msg); \
+#define dprintk(fmt, ...) \
+do { \
+ if (debug) \
+ pr_warn(fmt, ##__VA_ARGS__); \
} while (0)
#define SONY_LAPTOP_DRIVER_VERSION "0.6"
@@ -418,7 +420,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
error = kfifo_alloc(&sony_laptop_input.fifo,
SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
- pr_err(DRV_PFX "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
goto err_dec_users;
}
@@ -702,7 +704,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
return 0;
}
- pr_warn(DRV_PFX "acpi_callreadfunc failed\n");
+ pr_warn("acpi_callreadfunc failed\n");
return -1;
}
@@ -728,8 +730,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
if (status == AE_OK) {
if (result != NULL) {
if (out_obj.type != ACPI_TYPE_INTEGER) {
- pr_warn(DRV_PFX "acpi_evaluate_object bad "
- "return type\n");
+ pr_warn("acpi_evaluate_object bad return type\n");
return -1;
}
*result = out_obj.integer.value;
@@ -737,7 +738,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
return 0;
}
- pr_warn(DRV_PFX "acpi_evaluate_object failed\n");
+ pr_warn("acpi_evaluate_object failed\n");
return -1;
}
@@ -961,7 +962,6 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
static int sony_nc_get_brightness_ng(struct backlight_device *bd)
{
int result;
- int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
@@ -973,7 +973,6 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
static int sony_nc_update_status_ng(struct backlight_device *bd)
{
int value, result;
- int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
@@ -1104,10 +1103,8 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
}
if (!key_event->data)
- pr_info(DRV_PFX
- "Unknown event: 0x%x 0x%x\n",
- key_handle,
- ev);
+ pr_info("Unknown event: 0x%x 0x%x\n",
+ key_handle, ev);
else
sony_laptop_report_input_event(ev);
}
@@ -1128,7 +1125,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
struct acpi_device_info *info;
if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
- pr_warn(DRV_PFX "method: name: %4.4s, args %X\n",
+ pr_warn("method: name: %4.4s, args %X\n",
(char *)&info->name, info->param_count);
kfree(info);
@@ -1169,7 +1166,7 @@ static int sony_nc_resume(struct acpi_device *device)
ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
item->value, NULL);
if (ret < 0) {
- pr_err(DRV_PFX "%s: %d\n", __func__, ret);
+ pr_err("%s: %d\n", __func__, ret);
break;
}
}
@@ -1336,12 +1333,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
device_enum = (union acpi_object *) buffer.pointer;
if (!device_enum) {
- pr_err(DRV_PFX "No SN06 return object.");
+ pr_err("No SN06 return object\n");
goto out_no_enum;
}
if (device_enum->type != ACPI_TYPE_BUFFER) {
- pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n",
- device_enum->type);
+ pr_err("Invalid SN06 return object 0x%.2x\n",
+ device_enum->type);
goto out_no_enum;
}
@@ -1662,7 +1659,7 @@ static void sony_nc_backlight_setup(void)
ops, &props);
if (IS_ERR(sony_bl_props.dev)) {
- pr_warn(DRV_PFX "unable to register backlight device\n");
+ pr_warn("unable to register backlight device\n");
sony_bl_props.dev = NULL;
} else
sony_bl_props.dev->props.brightness =
@@ -1682,8 +1679,7 @@ static int sony_nc_add(struct acpi_device *device)
acpi_handle handle;
struct sony_nc_value *item;
- pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME,
- SONY_LAPTOP_DRIVER_VERSION);
+ pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
sony_nc_acpi_device = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -1708,7 +1704,7 @@ static int sony_nc_add(struct acpi_device *device)
sony_nc_acpi_handle, 1, sony_walk_callback,
NULL, NULL, NULL);
if (ACPI_FAILURE(status)) {
- pr_warn(DRV_PFX "unable to walk acpi resources\n");
+ pr_warn("unable to walk acpi resources\n");
result = -ENODEV;
goto outpresent;
}
@@ -1736,13 +1732,12 @@ static int sony_nc_add(struct acpi_device *device)
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
- pr_err(DRV_PFX "Unable to create input devices.\n");
+ pr_err("Unable to create input devices\n");
goto outkbdbacklight;
}
if (acpi_video_backlight_support()) {
- pr_info(DRV_PFX "brightness ignored, must be "
- "controlled by ACPI video driver\n");
+ pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
sony_nc_backlight_setup();
}
@@ -2265,9 +2260,9 @@ out:
if (pcidev)
pci_dev_put(pcidev);
- pr_info(DRV_PFX "detected Type%d model\n",
- dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
- dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
+ pr_info("detected Type%d model\n",
+ dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
+ dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
}
/* camera tests and poweron/poweroff */
@@ -2313,7 +2308,7 @@ static int __sony_pic_camera_ready(void)
static int __sony_pic_camera_off(void)
{
if (!camera) {
- pr_warn(DRV_PFX "camera control not enabled\n");
+ pr_warn("camera control not enabled\n");
return -ENODEV;
}
@@ -2333,7 +2328,7 @@ static int __sony_pic_camera_on(void)
int i, j, x;
if (!camera) {
- pr_warn(DRV_PFX "camera control not enabled\n");
+ pr_warn("camera control not enabled\n");
return -ENODEV;
}
@@ -2356,7 +2351,7 @@ static int __sony_pic_camera_on(void)
}
if (j == 0) {
- pr_warn(DRV_PFX "failed to power on camera\n");
+ pr_warn("failed to power on camera\n");
return -ENODEV;
}
@@ -2412,8 +2407,7 @@ int sony_pic_camera_command(int command, u8 value)
ITERATIONS_SHORT);
break;
default:
- pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n",
- command);
+ pr_err("sony_pic_camera_command invalid: %d\n", command);
break;
}
mutex_unlock(&spic_dev.lock);
@@ -2819,7 +2813,7 @@ static int sonypi_compat_init(void)
error =
kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
- pr_err(DRV_PFX "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
return error;
}
@@ -2829,12 +2823,12 @@ static int sonypi_compat_init(void)
sonypi_misc_device.minor = minor;
error = misc_register(&sonypi_misc_device);
if (error) {
- pr_err(DRV_PFX "misc_register failed\n");
+ pr_err("misc_register failed\n");
goto err_free_kfifo;
}
if (minor == -1)
- pr_info(DRV_PFX "device allocated minor is %d\n",
- sonypi_misc_device.minor);
+ pr_info("device allocated minor is %d\n",
+ sonypi_misc_device.minor);
return 0;
@@ -2893,8 +2887,8 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
for (i = 0; i < p->interrupt_count; i++) {
if (!p->interrupts[i]) {
- pr_warn(DRV_PFX "Invalid IRQ %d\n",
- p->interrupts[i]);
+ pr_warn("Invalid IRQ %d\n",
+ p->interrupts[i]);
continue;
}
interrupt = kzalloc(sizeof(*interrupt),
@@ -2932,14 +2926,14 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
ioport->io2.address_length);
}
else {
- pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
+ pr_err("Unknown SPIC Type, more than 2 IO Ports\n");
return AE_ERROR;
}
return AE_OK;
}
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
- resource->type);
+ resource->type);
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
@@ -2960,7 +2954,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
dprintk("Evaluating _STA\n");
result = acpi_bus_get_status(device);
if (result) {
- pr_warn(DRV_PFX "Unable to read status\n");
+ pr_warn("Unable to read status\n");
goto end;
}
@@ -2976,8 +2970,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
sony_pic_read_possible_resource, &spic_dev);
if (ACPI_FAILURE(status)) {
- pr_warn(DRV_PFX "Failure evaluating %s\n",
- METHOD_NAME__PRS);
+ pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS);
result = -ENODEV;
}
end:
@@ -3090,7 +3083,7 @@ static int sony_pic_enable(struct acpi_device *device,
/* check for total failure */
if (ACPI_FAILURE(status)) {
- pr_err(DRV_PFX "Error evaluating _SRS\n");
+ pr_err("Error evaluating _SRS\n");
result = -ENODEV;
goto end;
}
@@ -3182,7 +3175,7 @@ static int sony_pic_remove(struct acpi_device *device, int type)
struct sony_pic_irq *irq, *tmp_irq;
if (sony_pic_disable(device)) {
- pr_err(DRV_PFX "Couldn't disable device.\n");
+ pr_err("Couldn't disable device\n");
return -ENXIO;
}
@@ -3222,8 +3215,7 @@ static int sony_pic_add(struct acpi_device *device)
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
- pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME,
- SONY_LAPTOP_DRIVER_VERSION);
+ pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -3233,14 +3225,14 @@ static int sony_pic_add(struct acpi_device *device)
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
- pr_err(DRV_PFX "Unable to read possible resources.\n");
+ pr_err("Unable to read possible resources\n");
goto err_free_resources;
}
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
- pr_err(DRV_PFX "Unable to create input devices.\n");
+ pr_err("Unable to create input devices\n");
goto err_free_resources;
}
@@ -3281,7 +3273,7 @@ static int sony_pic_add(struct acpi_device *device)
}
}
if (!spic_dev.cur_ioport) {
- pr_err(DRV_PFX "Failed to request_region.\n");
+ pr_err("Failed to request_region\n");
result = -ENODEV;
goto err_remove_compat;
}
@@ -3301,7 +3293,7 @@ static int sony_pic_add(struct acpi_device *device)
}
}
if (!spic_dev.cur_irq) {
- pr_err(DRV_PFX "Failed to request_irq.\n");
+ pr_err("Failed to request_irq\n");
result = -ENODEV;
goto err_release_region;
}
@@ -3309,7 +3301,7 @@ static int sony_pic_add(struct acpi_device *device)
/* set resource status _SRS */
result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
if (result) {
- pr_err(DRV_PFX "Couldn't enable device.\n");
+ pr_err("Couldn't enable device\n");
goto err_free_irq;
}
@@ -3418,7 +3410,7 @@ static int __init sony_laptop_init(void)
if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
result = acpi_bus_register_driver(&sony_pic_driver);
if (result) {
- pr_err(DRV_PFX "Unable to register SPIC driver.");
+ pr_err("Unable to register SPIC driver\n");
goto out;
}
spic_drv_registered = 1;
@@ -3426,7 +3418,7 @@ static int __init sony_laptop_init(void)
result = acpi_bus_register_driver(&sony_nc_driver);
if (result) {
- pr_err(DRV_PFX "Unable to register SNC driver.");
+ pr_err("Unable to register SNC driver\n");
goto out_unregister_pic;
}
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 865ef78d6f1a..e24f5ae475af 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -25,6 +25,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -40,9 +42,6 @@
#define TC1100_INSTANCE_WIRELESS 1
#define TC1100_INSTANCE_JOGDIAL 2
-#define TC1100_LOGPREFIX "tc1100-wmi: "
-#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
-
MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
MODULE_LICENSE("GPL");
@@ -264,7 +263,7 @@ static int __init tc1100_init(void)
if (error)
goto err_device_del;
- printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+ pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
return 0;
err_device_del:
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 562fcf0dd2b5..77f6e707a2a9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,6 +21,8 @@
* 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define TPACPI_VERSION "0.24"
#define TPACPI_SYSFS_VERSION 0x020700
@@ -224,17 +226,6 @@ enum tpacpi_hkey_event_t {
#define TPACPI_MAX_ACPI_ARGS 3
-/* printk headers */
-#define TPACPI_LOG TPACPI_FILE ": "
-#define TPACPI_EMERG KERN_EMERG TPACPI_LOG
-#define TPACPI_ALERT KERN_ALERT TPACPI_LOG
-#define TPACPI_CRIT KERN_CRIT TPACPI_LOG
-#define TPACPI_ERR KERN_ERR TPACPI_LOG
-#define TPACPI_WARN KERN_WARNING TPACPI_LOG
-#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
-#define TPACPI_INFO KERN_INFO TPACPI_LOG
-#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
-
/* Debugging printk groups */
#define TPACPI_DBG_ALL 0xffff
#define TPACPI_DBG_DISCLOSETASK 0x8000
@@ -389,34 +380,36 @@ static int tpacpi_uwb_emulstate;
* Debugging helpers
*/
-#define dbg_printk(a_dbg_level, format, arg...) \
- do { if (dbg_level & (a_dbg_level)) \
- printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
- } while (0)
+#define dbg_printk(a_dbg_level, format, arg...) \
+do { \
+ if (dbg_level & (a_dbg_level)) \
+ printk(KERN_DEBUG pr_fmt("%s: " format), \
+ __func__, ##arg); \
+} while (0)
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
#define vdbg_printk dbg_printk
static const char *str_supported(int is_supported);
#else
-#define vdbg_printk(a_dbg_level, format, arg...) \
- do { } while (0)
+static inline const char *str_supported(int is_supported) { return ""; }
+#define vdbg_printk(a_dbg_level, format, arg...) \
+ no_printk(format, ##arg)
#endif
static void tpacpi_log_usertask(const char * const what)
{
- printk(TPACPI_DEBUG "%s: access by process with PID %d\n",
- what, task_tgid_vnr(current));
+ printk(KERN_DEBUG pr_fmt("%s: access by process with PID %d\n"),
+ what, task_tgid_vnr(current));
}
-#define tpacpi_disclose_usertask(what, format, arg...) \
- do { \
- if (unlikely( \
- (dbg_level & TPACPI_DBG_DISCLOSETASK) && \
- (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
- printk(TPACPI_DEBUG "%s: PID %d: " format, \
- what, task_tgid_vnr(current), ## arg); \
- } \
- } while (0)
+#define tpacpi_disclose_usertask(what, format, arg...) \
+do { \
+ if (unlikely((dbg_level & TPACPI_DBG_DISCLOSETASK) && \
+ (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
+ printk(KERN_DEBUG pr_fmt("%s: PID %d: " format), \
+ what, task_tgid_vnr(current), ## arg); \
+ } \
+} while (0)
/*
* Quirk handling helpers
@@ -535,15 +528,6 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
"HKEY", /* all others */
); /* 570 */
-TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
- "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
- "\\_SB.PCI0.VID0", /* 770e */
- "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
- "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
- "\\_SB.PCI0.AGP.VID", /* all others */
- ); /* R30, R31 */
-
-
/*************************************************************************
* ACPI helpers
*/
@@ -563,7 +547,7 @@ static int acpi_evalf(acpi_handle handle,
int quiet;
if (!*fmt) {
- printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
+ pr_err("acpi_evalf() called with empty format\n");
return 0;
}
@@ -588,7 +572,7 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(TPACPI_ERR "acpi_evalf() called "
+ pr_err("acpi_evalf() called "
"with invalid format character '%c'\n", c);
va_end(ap);
return 0;
@@ -617,13 +601,13 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(TPACPI_ERR "acpi_evalf() called "
+ pr_err("acpi_evalf() called "
"with invalid format character '%c'\n", res_type);
return 0;
}
if (!success && !quiet)
- printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %s\n",
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
method, fmt0, acpi_format_exception(status));
return success;
@@ -767,8 +751,7 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
if (rc < 0) {
- printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
- ibm->name, rc);
+ pr_err("acpi_bus_get_device(%s) failed: %d\n", ibm->name, rc);
return -ENODEV;
}
@@ -781,12 +764,10 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
ibm->acpi->type, dispatch_acpi_notify, ibm);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
- printk(TPACPI_NOTICE
- "another device driver is already "
- "handling %s events\n", ibm->name);
+ pr_notice("another device driver is already "
+ "handling %s events\n", ibm->name);
} else {
- printk(TPACPI_ERR
- "acpi_install_notify_handler(%s) failed: %s\n",
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
ibm->name, acpi_format_exception(status));
}
return -ENODEV;
@@ -811,8 +792,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
if (!ibm->acpi->driver) {
- printk(TPACPI_ERR
- "failed to allocate memory for ibm->acpi->driver\n");
+ pr_err("failed to allocate memory for ibm->acpi->driver\n");
return -ENOMEM;
}
@@ -823,7 +803,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
rc = acpi_bus_register_driver(ibm->acpi->driver);
if (rc < 0) {
- printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
+ pr_err("acpi_bus_register_driver(%s) failed: %d\n",
ibm->name, rc);
kfree(ibm->acpi->driver);
ibm->acpi->driver = NULL;
@@ -1081,15 +1061,14 @@ static int parse_strtoul(const char *buf,
static void tpacpi_disable_brightness_delay(void)
{
if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
- printk(TPACPI_NOTICE
- "ACPI backlight control delay disabled\n");
+ pr_notice("ACPI backlight control delay disabled\n");
}
static void printk_deprecated_attribute(const char * const what,
const char * const details)
{
tpacpi_log_usertask("deprecated sysfs attribute");
- printk(TPACPI_WARN "WARNING: sysfs attribute %s is deprecated and "
+ pr_warn("WARNING: sysfs attribute %s is deprecated and "
"will be removed. %s\n",
what, details);
}
@@ -1264,8 +1243,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
&tpacpi_rfk_rfkill_ops,
atp_rfk);
if (!atp_rfk || !atp_rfk->rfkill) {
- printk(TPACPI_ERR
- "failed to allocate memory for rfkill class\n");
+ pr_err("failed to allocate memory for rfkill class\n");
kfree(atp_rfk);
return -ENOMEM;
}
@@ -1275,9 +1253,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
sw_status = (tp_rfkops->get_status)();
if (sw_status < 0) {
- printk(TPACPI_ERR
- "failed to read initial state for %s, error %d\n",
- name, sw_status);
+ pr_err("failed to read initial state for %s, error %d\n",
+ name, sw_status);
} else {
sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
@@ -1291,9 +1268,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
- printk(TPACPI_ERR
- "failed to register %s rfkill switch: %d\n",
- name, res);
+ pr_err("failed to register %s rfkill switch: %d\n", name, res);
rfkill_destroy(atp_rfk->rfkill);
kfree(atp_rfk);
return res;
@@ -1301,7 +1276,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
tpacpi_rfkill_switches[id] = atp_rfk;
- printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
+ pr_info("rfkill switch %s: radio is %sblocked\n",
name, (sw_state || hw_state) ? "" : "un");
return 0;
}
@@ -1825,10 +1800,8 @@ static void __init tpacpi_check_outdated_fw(void)
* broken, or really stable to begin with, so it is
* best if the user upgrades the firmware anyway.
*/
- printk(TPACPI_WARN
- "WARNING: Outdated ThinkPad BIOS/EC firmware\n");
- printk(TPACPI_WARN
- "WARNING: This firmware may be missing critical bug "
+ pr_warn("WARNING: Outdated ThinkPad BIOS/EC firmware\n");
+ pr_warn("WARNING: This firmware may be missing critical bug "
"fixes and/or important features\n");
}
}
@@ -2117,9 +2090,7 @@ void static hotkey_mask_warn_incomplete_mask(void)
(hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
if (wantedmask)
- printk(TPACPI_NOTICE
- "required events 0x%08x not enabled!\n",
- wantedmask);
+ pr_notice("required events 0x%08x not enabled!\n", wantedmask);
}
/*
@@ -2157,10 +2128,9 @@ static int hotkey_mask_set(u32 mask)
* a given event.
*/
if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
- printk(TPACPI_NOTICE
- "asked for hotkey mask 0x%08x, but "
- "firmware forced it to 0x%08x\n",
- fwmask, hotkey_acpi_mask);
+ pr_notice("asked for hotkey mask 0x%08x, but "
+ "firmware forced it to 0x%08x\n",
+ fwmask, hotkey_acpi_mask);
}
if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
@@ -2184,13 +2154,11 @@ static int hotkey_user_mask_set(const u32 mask)
(mask == 0xffff || mask == 0xffffff ||
mask == 0xffffffff)) {
tp_warned.hotkey_mask_ff = 1;
- printk(TPACPI_NOTICE
- "setting the hotkey mask to 0x%08x is likely "
- "not the best way to go about it\n", mask);
- printk(TPACPI_NOTICE
- "please consider using the driver defaults, "
- "and refer to up-to-date thinkpad-acpi "
- "documentation\n");
+ pr_notice("setting the hotkey mask to 0x%08x is likely "
+ "not the best way to go about it\n", mask);
+ pr_notice("please consider using the driver defaults, "
+ "and refer to up-to-date thinkpad-acpi "
+ "documentation\n");
}
/* Try to enable what the user asked for, plus whatever we need.
@@ -2574,8 +2542,7 @@ static void hotkey_poll_setup(const bool may_warn)
NULL, TPACPI_NVRAM_KTHREAD_NAME);
if (IS_ERR(tpacpi_hotkey_task)) {
tpacpi_hotkey_task = NULL;
- printk(TPACPI_ERR
- "could not create kernel thread "
+ pr_err("could not create kernel thread "
"for hotkey polling\n");
}
}
@@ -2583,11 +2550,10 @@ static void hotkey_poll_setup(const bool may_warn)
hotkey_poll_stop_sync();
if (may_warn && (poll_driver_mask || poll_user_mask) &&
hotkey_poll_freq == 0) {
- printk(TPACPI_NOTICE
- "hot keys 0x%08x and/or events 0x%08x "
- "require polling, which is currently "
- "disabled\n",
- poll_user_mask, poll_driver_mask);
+ pr_notice("hot keys 0x%08x and/or events 0x%08x "
+ "require polling, which is currently "
+ "disabled\n",
+ poll_user_mask, poll_driver_mask);
}
}
}
@@ -2811,13 +2777,13 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
mutex_unlock(&hotkey_mutex);
if (rc < 0)
- printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
- "firmware event mask!\n");
+ pr_err("hotkey_source_mask: "
+ "failed to update the firmware event mask!\n");
if (r_ev)
- printk(TPACPI_NOTICE "hotkey_source_mask: "
- "some important events were disabled: "
- "0x%04x\n", r_ev);
+ pr_notice("hotkey_source_mask: "
+ "some important events were disabled: 0x%04x\n",
+ r_ev);
tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
@@ -3048,8 +3014,7 @@ static void hotkey_exit(void)
if (((tp_features.hotkey_mask &&
hotkey_mask_set(hotkey_orig_mask)) |
hotkey_status_set(false)) != 0)
- printk(TPACPI_ERR
- "failed to restore hot key mask "
+ pr_err("failed to restore hot key mask "
"to BIOS defaults\n");
}
@@ -3288,10 +3253,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
if ((hkeyv >> 8) != 1) {
- printk(TPACPI_ERR "unknown version of the "
- "HKEY interface: 0x%x\n", hkeyv);
- printk(TPACPI_ERR "please report this to %s\n",
- TPACPI_MAIL);
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
} else {
/*
* MHKV 0x100 in A31, R40, R40e,
@@ -3304,8 +3268,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Paranoia check AND init hotkey_all_mask */
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- printk(TPACPI_ERR
- "missing MHKA handler, "
+ pr_err("missing MHKA handler, "
"please report this to %s\n",
TPACPI_MAIL);
/* Fallback: pre-init for FN+F3,F4,F12 */
@@ -3343,16 +3306,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!tpacpi_wlsw_emulstate;
- printk(TPACPI_INFO
- "radio switch emulation enabled\n");
+ pr_info("radio switch emulation enabled\n");
} else
#endif
/* Not all thinkpads have a hardware radio switch */
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!status;
- printk(TPACPI_INFO
- "radio switch found; radios are %s\n",
+ pr_info("radio switch found; radios are %s\n",
enabled(status, 0));
}
if (tp_features.hotkey_wlsw)
@@ -3363,8 +3324,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
tp_features.hotkey_tablet = 1;
tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
- printk(TPACPI_INFO
- "possible tablet mode switch found; "
+ pr_info("possible tablet mode switch found; "
"ThinkPad in %s mode\n",
(tabletsw_state) ? "tablet" : "laptop");
res = add_to_attr_set(hotkey_dev_attributes,
@@ -3382,8 +3342,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
- printk(TPACPI_ERR
- "failed to allocate memory for key map\n");
+ pr_err("failed to allocate memory for key map\n");
res = -ENOMEM;
goto err_exit;
}
@@ -3426,13 +3385,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
- printk(TPACPI_INFO
- "This ThinkPad has standard ACPI backlight "
- "brightness control, supported by the ACPI "
- "video driver\n");
- printk(TPACPI_NOTICE
- "Disabling thinkpad-acpi brightness events "
- "by default...\n");
+ pr_info("This ThinkPad has standard ACPI backlight "
+ "brightness control, supported by the ACPI "
+ "video driver\n");
+ pr_notice("Disabling thinkpad-acpi brightness events "
+ "by default...\n");
/* Disable brightness up/down on Lenovo thinkpads when
* ACPI is handling them, otherwise it is plain impossible
@@ -3539,8 +3496,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
- printk(TPACPI_ALERT
- "EMERGENCY WAKEUP: battery almost empty\n");
+ pr_alert("EMERGENCY WAKEUP: battery almost empty\n");
/* how to auto-heal: */
/* 2313: woke up from S3, go to S4/S5 */
/* 2413: woke up from S4, go to S5 */
@@ -3551,9 +3507,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
}
if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
- printk(TPACPI_INFO
- "woke up due to a hot-unplug "
- "request...\n");
+ pr_info("woke up due to a hot-unplug request...\n");
hotkey_wakeup_reason_notify_change();
}
return true;
@@ -3605,37 +3559,31 @@ static bool hotkey_notify_thermal(const u32 hkey,
switch (hkey) {
case TP_HKEY_EV_THM_TABLE_CHANGED:
- printk(TPACPI_INFO
- "EC reports that Thermal Table has changed\n");
+ pr_info("EC reports that Thermal Table has changed\n");
/* recommended action: do nothing, we don't have
* Lenovo ATM information */
return true;
case TP_HKEY_EV_ALARM_BAT_HOT:
- printk(TPACPI_CRIT
- "THERMAL ALARM: battery is too hot!\n");
+ pr_crit("THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
break;
case TP_HKEY_EV_ALARM_BAT_XHOT:
- printk(TPACPI_ALERT
- "THERMAL EMERGENCY: battery is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
- printk(TPACPI_CRIT
- "THERMAL ALARM: "
+ pr_crit("THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
- printk(TPACPI_ALERT
- "THERMAL EMERGENCY: "
- "a sensor reports something is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: "
+ "a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
default:
- printk(TPACPI_ALERT
- "THERMAL ALERT: unknown thermal alarm received\n");
+ pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
known = false;
}
@@ -3652,8 +3600,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
bool known_ev;
if (event != 0x80) {
- printk(TPACPI_ERR
- "unknown HKEY notification event %d\n", event);
+ pr_err("unknown HKEY notification event %d\n", event);
/* forward it to userspace, maybe it knows how to handle it */
acpi_bus_generate_netlink_event(
ibm->acpi->device->pnp.device_class,
@@ -3664,7 +3611,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
while (1) {
if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
- printk(TPACPI_ERR "failed to retrieve HKEY event\n");
+ pr_err("failed to retrieve HKEY event\n");
return;
}
@@ -3692,8 +3639,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
switch (hkey) {
case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
- printk(TPACPI_INFO
- "bay ejected\n");
+ pr_info("bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
break;
@@ -3709,8 +3655,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
/* 0x4000-0x4FFF: dock-related wakeups */
if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
hotkey_autosleep_ack = 1;
- printk(TPACPI_INFO
- "undocked\n");
+ pr_info("undocked\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
} else {
@@ -3741,11 +3686,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = false;
}
if (!known_ev) {
- printk(TPACPI_NOTICE
- "unhandled HKEY event 0x%04x\n", hkey);
- printk(TPACPI_NOTICE
- "please report the conditions when this "
- "event happened to %s\n", TPACPI_MAIL);
+ pr_notice("unhandled HKEY event 0x%04x\n", hkey);
+ pr_notice("please report the conditions when this "
+ "event happened to %s\n", TPACPI_MAIL);
}
/* Legacy events */
@@ -3778,8 +3721,7 @@ static void hotkey_resume(void)
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
- printk(TPACPI_ERR
- "error while attempting to reset the event "
+ pr_err("error while attempting to reset the event "
"firmware interface\n");
tpacpi_send_radiosw_update();
@@ -3824,14 +3766,12 @@ static void hotkey_enabledisable_warn(bool enable)
{
tpacpi_log_usertask("procfs hotkey enable/disable");
if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable),
- TPACPI_WARN
- "hotkey enable/disable functionality has been "
- "removed from the driver. Hotkeys are always "
- "enabled\n"))
- printk(TPACPI_ERR
- "Please remove the hotkey=enable module "
- "parameter, it is deprecated. Hotkeys are always "
- "enabled\n");
+ pr_fmt("hotkey enable/disable functionality has been "
+ "removed from the driver. "
+ "Hotkeys are always enabled.\n")))
+ pr_err("Please remove the hotkey=enable module "
+ "parameter, it is deprecated. "
+ "Hotkeys are always enabled.\n");
}
static int hotkey_write(char *buf)
@@ -4011,8 +3951,7 @@ static void bluetooth_shutdown(void)
/* Order firmware to save current state to NVRAM */
if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
TP_ACPI_BLTH_SAVE_STATE))
- printk(TPACPI_NOTICE
- "failed to save bluetooth state to NVRAM\n");
+ pr_notice("failed to save bluetooth state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
"bluestooth state saved to NVRAM\n");
@@ -4051,8 +3990,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
tp_features.bluetooth = 1;
- printk(TPACPI_INFO
- "bluetooth switch emulation enabled\n");
+ pr_info("bluetooth switch emulation enabled\n");
} else
#endif
if (tp_features.bluetooth &&
@@ -4203,8 +4141,7 @@ static void wan_shutdown(void)
/* Order firmware to save current state to NVRAM */
if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
TP_ACPI_WGSV_SAVE_STATE))
- printk(TPACPI_NOTICE
- "failed to save WWAN state to NVRAM\n");
+ pr_notice("failed to save WWAN state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
"WWAN state saved to NVRAM\n");
@@ -4241,8 +4178,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
tp_features.wan = 1;
- printk(TPACPI_INFO
- "wwan switch emulation enabled\n");
+ pr_info("wwan switch emulation enabled\n");
} else
#endif
if (tp_features.wan &&
@@ -4382,8 +4318,7 @@ static int __init uwb_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
tp_features.uwb = 1;
- printk(TPACPI_INFO
- "uwb switch emulation enabled\n");
+ pr_info("uwb switch emulation enabled\n");
} else
#endif
if (tp_features.uwb &&
@@ -4444,6 +4379,15 @@ static int video_orig_autosw;
static int video_autosw_get(void);
static int video_autosw_set(int enable);
+TPACPI_HANDLE(vid, root,
+ "\\_SB.PCI.AGP.VGA", /* 570 */
+ "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
+ "\\_SB.PCI0.VID0", /* 770e */
+ "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
+ "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
+ "\\_SB.PCI0.AGP.VID", /* all others */
+ ); /* R30, R31 */
+
TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
static int __init video_init(struct ibm_init_struct *iibm)
@@ -4487,7 +4431,7 @@ static void video_exit(void)
dbg_printk(TPACPI_DBG_EXIT,
"restoring original video autoswitch mode\n");
if (video_autosw_set(video_orig_autosw))
- printk(TPACPI_ERR "error while trying to restore original "
+ pr_err("error while trying to restore original "
"video autoswitch mode\n");
}
@@ -4560,8 +4504,7 @@ static int video_outputsw_set(int status)
res = acpi_evalf(vid_handle, NULL,
"ASWT", "vdd", status * 0x100, 0);
if (!autosw && video_autosw_set(autosw)) {
- printk(TPACPI_ERR
- "video auto-switch left enabled due to error\n");
+ pr_err("video auto-switch left enabled due to error\n");
return -EIO;
}
break;
@@ -4630,8 +4573,7 @@ static int video_outputsw_cycle(void)
return -ENOSYS;
}
if (!autosw && video_autosw_set(autosw)) {
- printk(TPACPI_ERR
- "video auto-switch left enabled due to error\n");
+ pr_err("video auto-switch left enabled due to error\n");
return -EIO;
}
@@ -5348,7 +5290,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
GFP_KERNEL);
if (!tpacpi_leds) {
- printk(TPACPI_ERR "Out of memory for LED data\n");
+ pr_err("Out of memory for LED data\n");
return -ENOMEM;
}
@@ -5367,9 +5309,8 @@ static int __init led_init(struct ibm_init_struct *iibm)
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- printk(TPACPI_NOTICE
- "warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ pr_notice("warning: userspace override of important "
+ "firmware LEDs is enabled\n");
#endif
return 0;
}
@@ -5639,17 +5580,16 @@ static void thermal_dump_all_sensors(void)
if (n <= 0)
return;
- printk(TPACPI_NOTICE
- "temperatures (Celsius):");
+ pr_notice("temperatures (Celsius):");
for (i = 0; i < n; i++) {
if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
- printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
+ pr_cont(" %d", (int)(t.temp[i] / 1000));
else
- printk(KERN_CONT " N/A");
+ pr_cont(" N/A");
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
/* sysfs temp##_input -------------------------------------------------- */
@@ -5769,14 +5709,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
if (ta1 == 0) {
/* This is sheer paranoia, but we handle it anyway */
if (acpi_tmp7) {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"falling back to ACPI TMPx access "
"mode\n");
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
} else {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"disabling thermal sensors access\n");
thermal_read_mode = TPACPI_THERMAL_NONE;
}
@@ -6129,8 +6067,8 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(TPACPI_ERR "Unknown _BCL data, "
- "please report this to %s\n", TPACPI_MAIL);
+ pr_err("Unknown _BCL data, please report this to %s\n",
+ TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
@@ -6214,18 +6152,15 @@ static void __init tpacpi_detect_brightness_capabilities(void)
switch (b) {
case 16:
bright_maxlvl = 15;
- printk(TPACPI_INFO
- "detected a 16-level brightness capable ThinkPad\n");
+ pr_info("detected a 16-level brightness capable ThinkPad\n");
break;
case 8:
case 0:
bright_maxlvl = 7;
- printk(TPACPI_INFO
- "detected a 8-level brightness capable ThinkPad\n");
+ pr_info("detected a 8-level brightness capable ThinkPad\n");
break;
default:
- printk(TPACPI_ERR
- "Unsupported brightness interface, "
+ pr_err("Unsupported brightness interface, "
"please contact %s\n", TPACPI_MAIL);
tp_features.bright_unkfw = 1;
bright_maxlvl = b - 1;
@@ -6260,22 +6195,19 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (acpi_video_backlight_support()) {
if (brightness_enable > 1) {
- printk(TPACPI_INFO
- "Standard ACPI backlight interface "
- "available, not loading native one.\n");
+ pr_info("Standard ACPI backlight interface "
+ "available, not loading native one\n");
return 1;
} else if (brightness_enable == 1) {
- printk(TPACPI_WARN
- "Cannot enable backlight brightness support, "
+ pr_warn("Cannot enable backlight brightness support, "
"ACPI is already handling it. Refer to the "
- "acpi_backlight kernel parameter\n");
+ "acpi_backlight kernel parameter.\n");
return 1;
}
} else if (tp_features.bright_acpimode && brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface not "
- "available, thinkpad_acpi native "
- "brightness control enabled\n");
+ pr_notice("Standard ACPI backlight interface not "
+ "available, thinkpad_acpi native "
+ "brightness control enabled\n");
}
/*
@@ -6319,19 +6251,17 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (IS_ERR(ibm_backlight_device)) {
int rc = PTR_ERR(ibm_backlight_device);
ibm_backlight_device = NULL;
- printk(TPACPI_ERR "Could not register backlight device\n");
+ pr_err("Could not register backlight device\n");
return rc;
}
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness is supported\n");
if (quirks & TPACPI_BRGHT_Q_ASK) {
- printk(TPACPI_NOTICE
- "brightness: will use unverified default: "
- "brightness_mode=%d\n", brightness_mode);
- printk(TPACPI_NOTICE
- "brightness: please report to %s whether it works well "
- "or not on your ThinkPad\n", TPACPI_MAIL);
+ pr_notice("brightness: will use unverified default: "
+ "brightness_mode=%d\n", brightness_mode);
+ pr_notice("brightness: please report to %s whether it works well "
+ "or not on your ThinkPad\n", TPACPI_MAIL);
}
/* Added by mistake in early 2007. Probably useless, but it could
@@ -6804,8 +6734,7 @@ static int __init volume_create_alsa_mixer(void)
rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
sizeof(struct tpacpi_alsa_data), &card);
if (rc < 0 || !card) {
- printk(TPACPI_ERR
- "Failed to create ALSA card structures: %d\n", rc);
+ pr_err("Failed to create ALSA card structures: %d\n", rc);
return 1;
}
@@ -6839,9 +6768,8 @@ static int __init volume_create_alsa_mixer(void)
ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
rc = snd_ctl_add(card, ctl_vol);
if (rc < 0) {
- printk(TPACPI_ERR
- "Failed to create ALSA volume control: %d\n",
- rc);
+ pr_err("Failed to create ALSA volume control: %d\n",
+ rc);
goto err_exit;
}
data->ctl_vol_id = &ctl_vol->id;
@@ -6850,8 +6778,7 @@ static int __init volume_create_alsa_mixer(void)
ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
rc = snd_ctl_add(card, ctl_mute);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n",
- rc);
+ pr_err("Failed to create ALSA mute control: %d\n", rc);
goto err_exit;
}
data->ctl_mute_id = &ctl_mute->id;
@@ -6859,7 +6786,7 @@ static int __init volume_create_alsa_mixer(void)
snd_card_set_dev(card, &tpacpi_pdev->dev);
rc = snd_card_register(card);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc);
+ pr_err("Failed to register ALSA card: %d\n", rc);
goto err_exit;
}
@@ -6915,9 +6842,8 @@ static int __init volume_init(struct ibm_init_struct *iibm)
return -EINVAL;
if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
- printk(TPACPI_ERR
- "UCMS step volume mode not implemented, "
- "please contact %s\n", TPACPI_MAIL);
+ pr_err("UCMS step volume mode not implemented, "
+ "please contact %s\n", TPACPI_MAIL);
return 1;
}
@@ -6981,13 +6907,11 @@ static int __init volume_init(struct ibm_init_struct *iibm)
rc = volume_create_alsa_mixer();
if (rc) {
- printk(TPACPI_ERR
- "Could not create the ALSA mixer interface\n");
+ pr_err("Could not create the ALSA mixer interface\n");
return rc;
}
- printk(TPACPI_INFO
- "Console audio control enabled, mode: %s\n",
+ pr_info("Console audio control enabled, mode: %s\n",
(volume_control_allowed) ?
"override (read/write)" :
"monitor (read only)");
@@ -7049,12 +6973,10 @@ static int volume_write(char *buf)
if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
tp_warned.volume_ctrl_forbidden = 1;
- printk(TPACPI_NOTICE
- "Console audio control in monitor mode, "
- "changes are not allowed.\n");
- printk(TPACPI_NOTICE
- "Use the volume_control=1 module parameter "
- "to enable volume control\n");
+ pr_notice("Console audio control in monitor mode, "
+ "changes are not allowed\n");
+ pr_notice("Use the volume_control=1 module parameter "
+ "to enable volume control\n");
}
return -EPERM;
}
@@ -7129,8 +7051,7 @@ static void inline volume_alsa_notify_change(void)
static int __init volume_init(struct ibm_init_struct *iibm)
{
- printk(TPACPI_INFO
- "volume: disabled as there is no ALSA support in this kernel\n");
+ pr_info("volume: disabled as there is no ALSA support in this kernel\n");
return 1;
}
@@ -7337,9 +7258,8 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
static void fan_quirk1_setup(void)
{
if (fan_control_initial_status == 0x07) {
- printk(TPACPI_NOTICE
- "fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
+ pr_notice("fan_init: initial fan status is unknown, "
+ "assuming it is in auto mode\n");
tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -7726,8 +7646,7 @@ static void fan_watchdog_reset(void)
if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
msecs_to_jiffies(fan_watchdog_maxinterval
* 1000))) {
- printk(TPACPI_ERR
- "failed to queue the fan watchdog, "
+ pr_err("failed to queue the fan watchdog, "
"watchdog will not trigger\n");
}
} else
@@ -7741,11 +7660,11 @@ static void fan_watchdog_fire(struct work_struct *ignored)
if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
return;
- printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
+ pr_notice("fan watchdog: enabling fan\n");
rc = fan_set_enable();
if (rc < 0) {
- printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
- "will try again later...\n", -rc);
+ pr_err("fan watchdog: error %d while enabling fan, "
+ "will try again later...\n", -rc);
/* reschedule for later */
fan_watchdog_reset();
}
@@ -8049,8 +7968,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
"secondary fan support enabled\n");
}
} else {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"fan status and control unavailable\n");
return 1;
}
@@ -8150,9 +8068,8 @@ static void fan_suspend(pm_message_t state)
fan_control_resume_level = 0;
rc = fan_get_status_safe(&fan_control_resume_level);
if (rc < 0)
- printk(TPACPI_NOTICE
- "failed to read fan level for later "
- "restore during resume: %d\n", rc);
+ pr_notice("failed to read fan level for later "
+ "restore during resume: %d\n", rc);
/* if it is undefined, don't attempt to restore it.
* KEEP THIS LAST */
@@ -8207,13 +8124,11 @@ static void fan_resume(void)
return;
}
if (do_set) {
- printk(TPACPI_NOTICE
- "restoring fan level to 0x%02x\n",
- fan_control_resume_level);
+ pr_notice("restoring fan level to 0x%02x\n",
+ fan_control_resume_level);
rc = fan_set_level_safe(fan_control_resume_level);
if (rc < 0)
- printk(TPACPI_NOTICE
- "failed to restore fan level: %d\n", rc);
+ pr_notice("failed to restore fan level: %d\n", rc);
}
}
@@ -8305,8 +8220,8 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
*rc = fan_set_level_safe(level);
if (*rc == -ENXIO)
- printk(TPACPI_ERR "level command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("level command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan",
"set level to %d\n", level);
@@ -8321,8 +8236,8 @@ static int fan_write_cmd_enable(const char *cmd, int *rc)
*rc = fan_set_enable();
if (*rc == -ENXIO)
- printk(TPACPI_ERR "enable command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("enable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan", "enable\n");
@@ -8336,8 +8251,8 @@ static int fan_write_cmd_disable(const char *cmd, int *rc)
*rc = fan_set_disable();
if (*rc == -ENXIO)
- printk(TPACPI_ERR "disable command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("disable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan", "disable\n");
@@ -8356,8 +8271,8 @@ static int fan_write_cmd_speed(const char *cmd, int *rc)
*rc = fan_set_speed(speed);
if (*rc == -ENXIO)
- printk(TPACPI_ERR "speed command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("speed command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan",
"set speed to %d\n", speed);
@@ -8560,8 +8475,8 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (ibm->acpi->notify) {
ret = setup_acpi_notify(ibm);
if (ret == -ENODEV) {
- printk(TPACPI_NOTICE "disabling subdriver %s\n",
- ibm->name);
+ pr_notice("disabling subdriver %s\n",
+ ibm->name);
ret = 0;
goto err_out;
}
@@ -8583,8 +8498,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
entry = proc_create_data(ibm->name, mode, proc_dir,
&dispatch_proc_fops, ibm);
if (!entry) {
- printk(TPACPI_ERR "unable to create proc entry %s\n",
- ibm->name);
+ pr_err("unable to create proc entry %s\n", ibm->name);
ret = -ENODEV;
goto err_out;
}
@@ -8683,13 +8597,11 @@ static int __must_check __init get_thinkpad_model_data(
tp->ec_release = (ec_fw_string[4] << 8)
| ec_fw_string[5];
} else {
- printk(TPACPI_NOTICE
- "ThinkPad firmware release %s "
- "doesn't match the known patterns\n",
- ec_fw_string);
- printk(TPACPI_NOTICE
- "please report this to %s\n",
- TPACPI_MAIL);
+ pr_notice("ThinkPad firmware release %s "
+ "doesn't match the known patterns\n",
+ ec_fw_string);
+ pr_notice("please report this to %s\n",
+ TPACPI_MAIL);
}
break;
}
@@ -8733,8 +8645,7 @@ static int __init probe_for_thinkpad(void)
tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle);
if (!ec_handle) {
if (is_thinkpad)
- printk(TPACPI_ERR
- "Not yet supported ThinkPad detected!\n");
+ pr_err("Not yet supported ThinkPad detected!\n");
return -ENODEV;
}
@@ -8746,10 +8657,10 @@ static int __init probe_for_thinkpad(void)
static void __init thinkpad_acpi_init_banner(void)
{
- printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
- printk(TPACPI_INFO "%s\n", TPACPI_URL);
+ pr_info("%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ pr_info("%s\n", TPACPI_URL);
- printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
+ pr_info("ThinkPad BIOS %s, EC %s\n",
(thinkpad_id.bios_version_str) ?
thinkpad_id.bios_version_str : "unknown",
(thinkpad_id.ec_version_str) ?
@@ -8758,7 +8669,7 @@ static void __init thinkpad_acpi_init_banner(void)
BUG_ON(!thinkpad_id.vendor);
if (thinkpad_id.model_str)
- printk(TPACPI_INFO "%s %s, model %s\n",
+ pr_info("%s %s, model %s\n",
(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
"IBM" : ((thinkpad_id.vendor ==
PCI_VENDOR_ID_LENOVO) ?
@@ -9024,8 +8935,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = get_thinkpad_model_data(&thinkpad_id);
if (ret) {
- printk(TPACPI_ERR
- "unable to get DMI data: %d\n", ret);
+ pr_err("unable to get DMI data: %d\n", ret);
thinkpad_acpi_module_exit();
return ret;
}
@@ -9051,16 +8961,14 @@ static int __init thinkpad_acpi_module_init(void)
proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
if (!proc_dir) {
- printk(TPACPI_ERR
- "unable to create proc dir " TPACPI_PROC_DIR);
+ pr_err("unable to create proc dir " TPACPI_PROC_DIR "\n");
thinkpad_acpi_module_exit();
return -ENODEV;
}
ret = platform_driver_register(&tpacpi_pdriver);
if (ret) {
- printk(TPACPI_ERR
- "unable to register main platform driver\n");
+ pr_err("unable to register main platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9068,8 +8976,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_hwmon_pdriver);
if (ret) {
- printk(TPACPI_ERR
- "unable to register hwmon platform driver\n");
+ pr_err("unable to register hwmon platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9082,8 +8989,7 @@ static int __init thinkpad_acpi_module_init(void)
&tpacpi_hwmon_pdriver.driver);
}
if (ret) {
- printk(TPACPI_ERR
- "unable to create sysfs driver attributes\n");
+ pr_err("unable to create sysfs driver attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9096,7 +9002,7 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- printk(TPACPI_ERR "unable to register platform device\n");
+ pr_err("unable to register platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9106,16 +9012,14 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- printk(TPACPI_ERR
- "unable to register hwmon platform device\n");
+ pr_err("unable to register hwmon platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
ret = device_create_file(&tpacpi_sensors_pdev->dev,
&dev_attr_thinkpad_acpi_pdev_name);
if (ret) {
- printk(TPACPI_ERR
- "unable to create sysfs hwmon device attributes\n");
+ pr_err("unable to create sysfs hwmon device attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9124,14 +9028,14 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_hwmon)) {
ret = PTR_ERR(tpacpi_hwmon);
tpacpi_hwmon = NULL;
- printk(TPACPI_ERR "unable to register hwmon device\n");
+ pr_err("unable to register hwmon device\n");
thinkpad_acpi_module_exit();
return ret;
}
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
- printk(TPACPI_ERR "unable to allocate input device\n");
+ pr_err("unable to allocate input device\n");
thinkpad_acpi_module_exit();
return -ENOMEM;
} else {
@@ -9163,7 +9067,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
- printk(TPACPI_ERR "unable to register input device\n");
+ pr_err("unable to register input device\n");
thinkpad_acpi_module_exit();
return ret;
} else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 1d07d6d09f27..4c20447ddbb7 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -194,7 +194,7 @@ static int __init topstar_laptop_init(void)
if (ret < 0)
return ret;
- printk(KERN_INFO "Topstar Laptop ACPI extras driver loaded\n");
+ pr_info("ACPI extras driver loaded\n");
return 0;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 63f42a22e102..cb009b2629ee 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -35,6 +35,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define TOSHIBA_ACPI_VERSION "0.19"
#define PROC_INTERFACE_VERSION 1
@@ -60,11 +62,6 @@ MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
-#define MY_LOGPREFIX "toshiba_acpi: "
-#define MY_ERR KERN_ERR MY_LOGPREFIX
-#define MY_NOTICE KERN_NOTICE MY_LOGPREFIX
-#define MY_INFO KERN_INFO MY_LOGPREFIX
-
/* Toshiba ACPI method paths */
#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
#define TOSH_INTERFACE_1 "\\_SB_.VALD"
@@ -301,7 +298,7 @@ static int toshiba_illumination_available(void)
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return 0;
}
in[0] = 0xf400;
@@ -320,7 +317,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return;
}
@@ -331,7 +328,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[2] = 1;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed\n");
return;
}
} else {
@@ -341,7 +338,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[2] = 0;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed.\n");
return;
}
}
@@ -364,7 +361,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return LED_OFF;
}
@@ -373,7 +370,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[1] = 0x14e;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed.\n");
return LED_OFF;
}
@@ -517,7 +514,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
seq_printf(m, "brightness_levels: %d\n",
HCI_LCD_BRIGHTNESS_LEVELS);
} else {
- printk(MY_ERR "Error reading LCD brightness\n");
+ pr_err("Error reading LCD brightness\n");
}
return 0;
@@ -592,7 +589,7 @@ static int video_proc_show(struct seq_file *m, void *v)
seq_printf(m, "crt_out: %d\n", is_crt);
seq_printf(m, "tv_out: %d\n", is_tv);
} else {
- printk(MY_ERR "Error reading video out status\n");
+ pr_err("Error reading video out status\n");
}
return 0;
@@ -686,7 +683,7 @@ static int fan_proc_show(struct seq_file *m, void *v)
seq_printf(m, "running: %d\n", (value > 0));
seq_printf(m, "force_on: %d\n", force_fan);
} else {
- printk(MY_ERR "Error reading fan status\n");
+ pr_err("Error reading fan status\n");
}
return 0;
@@ -750,9 +747,9 @@ static int keys_proc_show(struct seq_file *m, void *v)
* some machines where system events sporadically
* become disabled. */
hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- printk(MY_NOTICE "Re-enabled hotkeys\n");
+ pr_notice("Re-enabled hotkeys\n");
} else {
- printk(MY_ERR "Error reading hotkey status\n");
+ pr_err("Error reading hotkey status\n");
goto end;
}
}
@@ -863,7 +860,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev,
value, 1, true)) {
- printk(MY_INFO "Unknown key %x\n",
+ pr_info("Unknown key %x\n",
value);
}
} else if (hci_result == HCI_NOT_SUPPORTED) {
@@ -871,7 +868,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
* some machines where system events sporadically
* become disabled. */
hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- printk(MY_NOTICE "Re-enabled hotkeys\n");
+ pr_notice("Re-enabled hotkeys\n");
}
} while (hci_result != HCI_EMPTY);
}
@@ -883,13 +880,13 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
status = acpi_get_handle(NULL, device, &toshiba_acpi.handle);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to get notification device\n");
+ pr_info("Unable to get notification device\n");
return -ENODEV;
}
toshiba_acpi.hotkey_dev = input_allocate_device();
if (!toshiba_acpi.hotkey_dev) {
- printk(MY_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
return -ENOMEM;
}
@@ -905,21 +902,21 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
status = acpi_install_notify_handler(toshiba_acpi.handle,
ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to install hotkey notification\n");
+ pr_info("Unable to install hotkey notification\n");
error = -ENODEV;
goto err_free_keymap;
}
status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to enable hotkeys\n");
+ pr_info("Unable to enable hotkeys\n");
error = -ENODEV;
goto err_remove_notify;
}
error = input_register_device(toshiba_acpi.hotkey_dev);
if (error) {
- printk(MY_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
goto err_remove_notify;
}
@@ -980,17 +977,17 @@ static int __init toshiba_acpi_init(void)
if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) {
method_hci = TOSH_INTERFACE_1 GHCI_METHOD;
if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1))
- printk(MY_INFO "Unable to activate hotkeys\n");
+ pr_info("Unable to activate hotkeys\n");
} else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) {
method_hci = TOSH_INTERFACE_2 GHCI_METHOD;
if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2))
- printk(MY_INFO "Unable to activate hotkeys\n");
+ pr_info("Unable to activate hotkeys\n");
} else
return -ENODEV;
- printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n",
+ pr_info("Toshiba Laptop ACPI Extras version %s\n",
TOSHIBA_ACPI_VERSION);
- printk(MY_INFO " HCI method: %s\n", method_hci);
+ pr_info(" HCI method: %s\n", method_hci);
mutex_init(&toshiba_acpi.mutex);
@@ -998,7 +995,7 @@ static int __init toshiba_acpi_init(void)
-1, NULL, 0);
if (IS_ERR(toshiba_acpi.p_dev)) {
ret = PTR_ERR(toshiba_acpi.p_dev);
- printk(MY_ERR "unable to register platform device\n");
+ pr_err("unable to register platform device\n");
toshiba_acpi.p_dev = NULL;
toshiba_acpi_exit();
return ret;
@@ -1028,7 +1025,7 @@ static int __init toshiba_acpi_init(void)
if (IS_ERR(toshiba_backlight_device)) {
ret = PTR_ERR(toshiba_backlight_device);
- printk(KERN_ERR "Could not register toshiba backlight device\n");
+ pr_err("Could not register toshiba backlight device\n");
toshiba_backlight_device = NULL;
toshiba_acpi_exit();
return ret;
@@ -1042,14 +1039,14 @@ static int __init toshiba_acpi_init(void)
&toshiba_rfk_ops,
&toshiba_acpi);
if (!toshiba_acpi.bt_rfk) {
- printk(MY_ERR "unable to allocate rfkill device\n");
+ pr_err("unable to allocate rfkill device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
ret = rfkill_register(toshiba_acpi.bt_rfk);
if (ret) {
- printk(MY_ERR "unable to register rfkill device\n");
+ pr_err("unable to register rfkill device\n");
rfkill_destroy(toshiba_acpi.bt_rfk);
toshiba_acpi_exit();
return ret;
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 944068611919..5fb7186694df 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -17,6 +17,8 @@
* delivered.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -70,14 +72,13 @@ static int toshiba_bluetooth_enable(acpi_handle handle)
if (!(result & 0x01))
return 0;
- printk(KERN_INFO "toshiba_bluetooth: Re-enabling Toshiba Bluetooth\n");
+ pr_info("Re-enabling Toshiba Bluetooth\n");
res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
return 0;
- printk(KERN_WARNING "toshiba_bluetooth: Failed to re-enable "
- "Toshiba Bluetooth\n");
+ pr_warn("Failed to re-enable Toshiba Bluetooth\n");
return -ENODEV;
}
@@ -107,8 +108,8 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
&bt_present);
if (!ACPI_FAILURE(status) && bt_present) {
- printk(KERN_INFO "Detected Toshiba ACPI Bluetooth device - "
- "installing RFKill handler\n");
+ pr_info("Detected Toshiba ACPI Bluetooth device - "
+ "installing RFKill handler\n");
result = toshiba_bluetooth_enable(device->handle);
}
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 05cc79672a8b..f23d5a84e7b1 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -486,16 +486,16 @@ static void wmi_dump_wdg(const struct guid_block *g)
pr_info("\tnotify_id: %02X\n", g->notify_id);
pr_info("\treserved: %02X\n", g->reserved);
pr_info("\tinstance_count: %d\n", g->instance_count);
- pr_info("\tflags: %#x ", g->flags);
+ pr_info("\tflags: %#x", g->flags);
if (g->flags) {
if (g->flags & ACPI_WMI_EXPENSIVE)
- pr_cont("ACPI_WMI_EXPENSIVE ");
+ pr_cont(" ACPI_WMI_EXPENSIVE");
if (g->flags & ACPI_WMI_METHOD)
- pr_cont("ACPI_WMI_METHOD ");
+ pr_cont(" ACPI_WMI_METHOD");
if (g->flags & ACPI_WMI_STRING)
- pr_cont("ACPI_WMI_STRING ");
+ pr_cont(" ACPI_WMI_STRING");
if (g->flags & ACPI_WMI_EVENT)
- pr_cont("ACPI_WMI_EVENT ");
+ pr_cont(" ACPI_WMI_EVENT");
}
pr_cont("\n");
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index c1372ed9d2e9..fad153dc0355 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -11,6 +11,8 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,7 +22,6 @@
#include <acpi/acpi_drivers.h>
#define MODULE_NAME "xo15-ebook"
-#define PREFIX MODULE_NAME ": "
#define XO15_EBOOK_CLASS MODULE_NAME
#define XO15_EBOOK_TYPE_UNKNOWN 0x00
@@ -105,7 +106,7 @@ static int ebook_switch_add(struct acpi_device *device)
class = acpi_device_class(device);
if (strcmp(hid, XO15_EBOOK_HID)) {
- printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
+ pr_err("Unsupported hid [%s]\n", hid);
error = -ENODEV;
goto err_free_input;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index dc8c531ed276..e57b50b38565 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -210,6 +210,15 @@ config CHARGER_ISP1704
Say Y to enable support for USB Charger Detection with
ISP1707/ISP1704 USB transceivers.
+config CHARGER_MAX8903
+ tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
+ depends on GENERIC_HARDIRQS
+ help
+ Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
+ The driver supports controlling charger-enable and current-limit
+ pins based on the status of charger connections with interrupt
+ handlers.
+
config CHARGER_TWL4030
tristate "OMAP TWL4030 BCI charger driver"
depends on TWL4030_CORE
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 8224990b933d..009a90fa8ac9 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -33,5 +33,6 @@ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
+obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 59e68dbd028b..bb16f5b7e167 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -4,6 +4,7 @@
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
* Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
@@ -76,7 +77,7 @@ struct bq27x00_reg_cache {
int time_to_empty_avg;
int time_to_full;
int charge_full;
- int charge_counter;
+ int cycle_count;
int capacity;
int flags;
@@ -115,7 +116,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_ENERGY_NOW,
};
@@ -267,7 +268,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
cache.charge_full = bq27x00_battery_read_lmd(di);
- cache.charge_counter = bq27x00_battery_read_cyct(di);
+ cache.cycle_count = bq27x00_battery_read_cyct(di);
if (!is_bq27500)
cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
@@ -496,8 +497,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
ret = bq27x00_simple_value(di->charge_design_full, val);
break;
- case POWER_SUPPLY_PROP_CHARGE_COUNTER:
- ret = bq27x00_simple_value(di->cache.charge_counter, val);
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ ret = bq27x00_simple_value(di->cache.cycle_count, val);
break;
case POWER_SUPPLY_PROP_ENERGY_NOW:
ret = bq27x00_battery_energy(di, val);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e534290f3256..f2c9cc33c0f9 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -86,7 +86,11 @@ static int rated_capacities[] = {
920, /* NEC */
1440, /* Samsung */
1440, /* BYD */
+#ifdef CONFIG_MACH_H4700
+ 1800, /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */
+#else
1440, /* Lishen */
+#endif
1440, /* NEC */
2880, /* Samsung */
2880, /* BYD */
@@ -186,7 +190,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
scale[0] = di->full_active_uAh;
for (i = 1; i < 5; i++)
- scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i];
+ scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 1 + i];
di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10);
di->full_active_uAh *= 1000; /* convert to µAh */
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 25b88ac1d44c..718f2c537827 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -161,12 +161,27 @@ static int __devexit gpio_charger_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int gpio_charger_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+ power_supply_changed(&gpio_charger->charger);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume);
+
static struct platform_driver gpio_charger_driver = {
.probe = gpio_charger_probe,
.remove = __devexit_p(gpio_charger_remove),
.driver = {
.name = "gpio-charger",
.owner = THIS_MODULE,
+ .pm = &gpio_charger_pm_ops,
},
};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 2ad9b14a5ce3..f6d72b402a8e 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -33,6 +33,7 @@
#include <linux/usb/ulpi.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/power/isp1704_charger.h>
/* Vendor specific Power Control register */
#define ISP1704_PWR_CTRL 0x3d
@@ -71,6 +72,18 @@ struct isp1704_charger {
};
/*
+ * Disable/enable the power from the isp1704 if a function for it
+ * has been provided with platform data.
+ */
+static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
+{
+ struct isp1704_charger_data *board = isp->dev->platform_data;
+
+ if (board->set_power)
+ board->set_power(on);
+}
+
+/*
* Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
* chargers).
*
@@ -222,6 +235,9 @@ static void isp1704_charger_work(struct work_struct *data)
mutex_lock(&lock);
+ if (event != USB_EVENT_NONE)
+ isp1704_charger_set_power(isp, 1);
+
switch (event) {
case USB_EVENT_VBUS:
isp->online = true;
@@ -269,6 +285,8 @@ static void isp1704_charger_work(struct work_struct *data)
*/
if (isp->otg->gadget)
usb_gadget_disconnect(isp->otg->gadget);
+
+ isp1704_charger_set_power(isp, 0);
break;
case USB_EVENT_ENUMERATED:
if (isp->present)
@@ -394,6 +412,8 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
+ isp1704_charger_set_power(isp, 1);
+
ret = isp1704_test_ulpi(isp);
if (ret < 0)
goto fail1;
@@ -434,6 +454,7 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
/* Detect charger if VBUS is valid (the cable was already plugged). */
ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
+ isp1704_charger_set_power(isp, 0);
if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
isp->event = USB_EVENT_VBUS;
schedule_work(&isp->work);
@@ -459,6 +480,7 @@ static int __devexit isp1704_charger_remove(struct platform_device *pdev)
otg_unregister_notifier(isp->otg, &isp->nb);
power_supply_unregister(&isp->psy);
otg_put_transceiver(isp->otg);
+ isp1704_charger_set_power(isp, 0);
kfree(isp);
return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
new file mode 100644
index 000000000000..33ff0e37809e
--- /dev/null
+++ b/drivers/power/max8903_charger.c
@@ -0,0 +1,391 @@
+/*
+ * max8903_charger.c - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/platform_device.h>
+#include <linux/power/max8903_charger.h>
+
+struct max8903_data {
+ struct max8903_pdata *pdata;
+ struct device *dev;
+ struct power_supply psy;
+ bool fault;
+ bool usb_in;
+ bool ta_in;
+};
+
+static enum power_supply_property max8903_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS, /* Charger status output */
+ POWER_SUPPLY_PROP_ONLINE, /* External power source */
+ POWER_SUPPLY_PROP_HEALTH, /* Fault or OK */
+};
+
+static int max8903_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8903_data *data = container_of(psy,
+ struct max8903_data, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ if (data->pdata->chg) {
+ if (gpio_get_value(data->pdata->chg) == 0)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (data->usb_in || data->ta_in)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 0;
+ if (data->usb_in || data->ta_in)
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ if (data->fault)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static irqreturn_t max8903_dcin(int irq, void *_data)
+{
+ struct max8903_data *data = _data;
+ struct max8903_pdata *pdata = data->pdata;
+ bool ta_in;
+ enum power_supply_type old_type;
+
+ ta_in = gpio_get_value(pdata->dok) ? false : true;
+
+ if (ta_in == data->ta_in)
+ return IRQ_HANDLED;
+
+ data->ta_in = ta_in;
+
+ /* Set Current-Limit-Mode 1:DC 0:USB */
+ if (pdata->dcm)
+ gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
+
+ /* Charger Enable / Disable (cen is negated) */
+ if (pdata->cen)
+ gpio_set_value(pdata->cen, ta_in ? 0 :
+ (data->usb_in ? 0 : 1));
+
+ dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ?
+ "Connected" : "Disconnected");
+
+ old_type = data->psy.type;
+
+ if (data->ta_in)
+ data->psy.type = POWER_SUPPLY_TYPE_MAINS;
+ else if (data->usb_in)
+ data->psy.type = POWER_SUPPLY_TYPE_USB;
+ else
+ data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+
+ if (old_type != data->psy.type)
+ power_supply_changed(&data->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max8903_usbin(int irq, void *_data)
+{
+ struct max8903_data *data = _data;
+ struct max8903_pdata *pdata = data->pdata;
+ bool usb_in;
+ enum power_supply_type old_type;
+
+ usb_in = gpio_get_value(pdata->uok) ? false : true;
+
+ if (usb_in == data->usb_in)
+ return IRQ_HANDLED;
+
+ data->usb_in = usb_in;
+
+ /* Do not touch Current-Limit-Mode */
+
+ /* Charger Enable / Disable (cen is negated) */
+ if (pdata->cen)
+ gpio_set_value(pdata->cen, usb_in ? 0 :
+ (data->ta_in ? 0 : 1));
+
+ dev_dbg(data->dev, "USB Charger %s.\n", usb_in ?
+ "Connected" : "Disconnected");
+
+ old_type = data->psy.type;
+
+ if (data->ta_in)
+ data->psy.type = POWER_SUPPLY_TYPE_MAINS;
+ else if (data->usb_in)
+ data->psy.type = POWER_SUPPLY_TYPE_USB;
+ else
+ data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+
+ if (old_type != data->psy.type)
+ power_supply_changed(&data->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max8903_fault(int irq, void *_data)
+{
+ struct max8903_data *data = _data;
+ struct max8903_pdata *pdata = data->pdata;
+ bool fault;
+
+ fault = gpio_get_value(pdata->flt) ? false : true;
+
+ if (fault == data->fault)
+ return IRQ_HANDLED;
+
+ data->fault = fault;
+
+ if (fault)
+ dev_err(data->dev, "Charger suffers a fault and stops.\n");
+ else
+ dev_err(data->dev, "Charger recovered from a fault.\n");
+
+ return IRQ_HANDLED;
+}
+
+static __devinit int max8903_probe(struct platform_device *pdev)
+{
+ struct max8903_data *data;
+ struct device *dev = &pdev->dev;
+ struct max8903_pdata *pdata = pdev->dev.platform_data;
+ int ret = 0;
+ int gpio;
+ int ta_in = 0;
+ int usb_in = 0;
+
+ data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+ data->pdata = pdata;
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ if (pdata->dc_valid == false && pdata->usb_valid == false) {
+ dev_err(dev, "No valid power sources.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (pdata->dc_valid) {
+ if (pdata->dok && gpio_is_valid(pdata->dok) &&
+ pdata->dcm && gpio_is_valid(pdata->dcm)) {
+ gpio = pdata->dok; /* PULL_UPed Interrupt */
+ ta_in = gpio_get_value(gpio) ? 0 : 1;
+
+ gpio = pdata->dcm; /* Output */
+ gpio_set_value(gpio, ta_in);
+ } else {
+ dev_err(dev, "When DC is wired, DOK and DCM should"
+ " be wired as well.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ } else {
+ if (pdata->dcm) {
+ if (gpio_is_valid(pdata->dcm))
+ gpio_set_value(pdata->dcm, 0);
+ else {
+ dev_err(dev, "Invalid pin: dcm.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ }
+
+ if (pdata->usb_valid) {
+ if (pdata->uok && gpio_is_valid(pdata->uok)) {
+ gpio = pdata->uok;
+ usb_in = gpio_get_value(gpio) ? 0 : 1;
+ } else {
+ dev_err(dev, "When USB is wired, UOK should be wired."
+ "as well.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->cen) {
+ if (gpio_is_valid(pdata->cen)) {
+ gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
+ } else {
+ dev_err(dev, "Invalid pin: cen.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->chg) {
+ if (!gpio_is_valid(pdata->chg)) {
+ dev_err(dev, "Invalid pin: chg.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->flt) {
+ if (!gpio_is_valid(pdata->flt)) {
+ dev_err(dev, "Invalid pin: flt.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (pdata->usus) {
+ if (!gpio_is_valid(pdata->usus)) {
+ dev_err(dev, "Invalid pin: usus.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ data->fault = false;
+ data->ta_in = ta_in;
+ data->usb_in = usb_in;
+
+ data->psy.name = "max8903_charger";
+ data->psy.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS :
+ ((usb_in) ? POWER_SUPPLY_TYPE_USB :
+ POWER_SUPPLY_TYPE_BATTERY);
+ data->psy.get_property = max8903_get_property;
+ data->psy.properties = max8903_charger_props;
+ data->psy.num_properties = ARRAY_SIZE(max8903_charger_props);
+
+ ret = power_supply_register(dev, &data->psy);
+ if (ret) {
+ dev_err(dev, "failed: power supply register.\n");
+ goto err;
+ }
+
+ if (pdata->dc_valid) {
+ ret = request_threaded_irq(gpio_to_irq(pdata->dok),
+ NULL, max8903_dcin,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "MAX8903 DC IN", data);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d for DC (%d)\n",
+ gpio_to_irq(pdata->dok), ret);
+ goto err_psy;
+ }
+ }
+
+ if (pdata->usb_valid) {
+ ret = request_threaded_irq(gpio_to_irq(pdata->uok),
+ NULL, max8903_usbin,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "MAX8903 USB IN", data);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d for USB (%d)\n",
+ gpio_to_irq(pdata->uok), ret);
+ goto err_dc_irq;
+ }
+ }
+
+ if (pdata->flt) {
+ ret = request_threaded_irq(gpio_to_irq(pdata->flt),
+ NULL, max8903_fault,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "MAX8903 Fault", data);
+ if (ret) {
+ dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
+ gpio_to_irq(pdata->flt), ret);
+ goto err_usb_irq;
+ }
+ }
+
+ return 0;
+
+err_usb_irq:
+ if (pdata->usb_valid)
+ free_irq(gpio_to_irq(pdata->uok), data);
+err_dc_irq:
+ if (pdata->dc_valid)
+ free_irq(gpio_to_irq(pdata->dok), data);
+err_psy:
+ power_supply_unregister(&data->psy);
+err:
+ kfree(data);
+ return ret;
+}
+
+static __devexit int max8903_remove(struct platform_device *pdev)
+{
+ struct max8903_data *data = platform_get_drvdata(pdev);
+
+ if (data) {
+ struct max8903_pdata *pdata = data->pdata;
+
+ if (pdata->flt)
+ free_irq(gpio_to_irq(pdata->flt), data);
+ if (pdata->usb_valid)
+ free_irq(gpio_to_irq(pdata->uok), data);
+ if (pdata->dc_valid)
+ free_irq(gpio_to_irq(pdata->dok), data);
+ power_supply_unregister(&data->psy);
+ kfree(data);
+ }
+
+ return 0;
+}
+
+static struct platform_driver max8903_driver = {
+ .probe = max8903_probe,
+ .remove = __devexit_p(max8903_remove),
+ .driver = {
+ .name = "max8903-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init max8903_init(void)
+{
+ return platform_driver_register(&max8903_driver);
+}
+module_init(max8903_init);
+
+static void __exit max8903_exit(void)
+{
+ platform_driver_unregister(&max8903_driver);
+}
+module_exit(max8903_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MAX8903 Charger Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("max8903-charger");
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index 0cd9f67d33e5..b527c93bf2f3 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -3,6 +3,12 @@
*
* Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com>
*
+ * Dynamic module parameter code from the Virtual Battery Driver
+ * Copyright (C) 2008 Pylone, Inc.
+ * By: Masashi YOKOTA <yokota@pylone.jp>
+ * Originally found here:
+ * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -15,8 +21,12 @@
#include <linux/delay.h>
#include <linux/vermagic.h>
-static int test_power_ac_online = 1;
-static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING;
+static int ac_online = 1;
+static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+static int battery_health = POWER_SUPPLY_HEALTH_GOOD;
+static int battery_present = 1; /* true */
+static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
+static int battery_capacity = 50;
static int test_power_get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
@@ -24,7 +34,7 @@ static int test_power_get_ac_property(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = test_power_ac_online;
+ val->intval = ac_online;
break;
default:
return -EINVAL;
@@ -47,22 +57,30 @@ static int test_power_get_battery_property(struct power_supply *psy,
val->strval = UTS_RELEASE;
break;
case POWER_SUPPLY_PROP_STATUS:
- val->intval = test_power_battery_status;
+ val->intval = battery_status;
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
break;
case POWER_SUPPLY_PROP_HEALTH:
- val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ val->intval = battery_health;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = battery_present;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ val->intval = battery_technology;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = 50;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = 100;
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
@@ -84,9 +102,11 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
- POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -118,6 +138,7 @@ static struct power_supply test_power_supplies[] = {
},
};
+
static int __init test_power_init(void)
{
int i;
@@ -145,8 +166,8 @@ static void __exit test_power_exit(void)
int i;
/* Let's see how we handle changes... */
- test_power_ac_online = 0;
- test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ ac_online = 0;
+ battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
power_supply_changed(&test_power_supplies[i]);
pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
@@ -158,6 +179,241 @@ static void __exit test_power_exit(void)
}
module_exit(test_power_exit);
+
+
+#define MAX_KEYLENGTH 256
+struct battery_property_map {
+ int value;
+ char const *key;
+};
+
+static struct battery_property_map map_ac_online[] = {
+ { 0, "on" },
+ { 1, "off" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_status[] = {
+ { POWER_SUPPLY_STATUS_CHARGING, "charging" },
+ { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" },
+ { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" },
+ { POWER_SUPPLY_STATUS_FULL, "full" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_health[] = {
+ { POWER_SUPPLY_HEALTH_GOOD, "good" },
+ { POWER_SUPPLY_HEALTH_OVERHEAT, "overheat" },
+ { POWER_SUPPLY_HEALTH_DEAD, "dead" },
+ { POWER_SUPPLY_HEALTH_OVERVOLTAGE, "overvoltage" },
+ { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_present[] = {
+ { 0, "false" },
+ { 1, "true" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_technology[] = {
+ { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" },
+ { POWER_SUPPLY_TECHNOLOGY_LION, "LION" },
+ { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" },
+ { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" },
+ { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" },
+ { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" },
+ { -1, NULL },
+};
+
+
+static int map_get_value(struct battery_property_map *map, const char *key,
+ int def_val)
+{
+ char buf[MAX_KEYLENGTH];
+ int cr;
+
+ strncpy(buf, key, MAX_KEYLENGTH);
+ buf[MAX_KEYLENGTH-1] = '\0';
+
+ cr = strnlen(buf, MAX_KEYLENGTH) - 1;
+ if (buf[cr] == '\n')
+ buf[cr] = '\0';
+
+ while (map->key) {
+ if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
+ return map->value;
+ map++;
+ }
+
+ return def_val;
+}
+
+
+static const char *map_get_key(struct battery_property_map *map, int value,
+ const char *def_key)
+{
+ while (map->key) {
+ if (map->value == value)
+ return map->key;
+ map++;
+ }
+
+ return def_key;
+}
+
+static int param_set_ac_online(const char *key, const struct kernel_param *kp)
+{
+ ac_online = map_get_value(map_ac_online, key, ac_online);
+ power_supply_changed(&test_power_supplies[0]);
+ return 0;
+}
+
+static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_status(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_status = map_get_value(map_status, key, battery_status);
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_health(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_health = map_get_value(map_health, key, battery_health);
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_present(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_present = map_get_value(map_present, key, battery_present);
+ power_supply_changed(&test_power_supplies[0]);
+ return 0;
+}
+
+static int param_get_battery_present(char *buffer,
+ const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_technology(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_technology = map_get_value(map_technology, key,
+ battery_technology);
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+static int param_get_battery_technology(char *buffer,
+ const struct kernel_param *kp)
+{
+ strcpy(buffer,
+ map_get_key(map_technology, battery_technology, "unknown"));
+ return strlen(buffer);
+}
+
+static int param_set_battery_capacity(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_capacity = tmp;
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+
+#define param_get_battery_capacity param_get_int
+
+
+
+static struct kernel_param_ops param_ops_ac_online = {
+ .set = param_set_ac_online,
+ .get = param_get_ac_online,
+};
+
+static struct kernel_param_ops param_ops_battery_status = {
+ .set = param_set_battery_status,
+ .get = param_get_battery_status,
+};
+
+static struct kernel_param_ops param_ops_battery_present = {
+ .set = param_set_battery_present,
+ .get = param_get_battery_present,
+};
+
+static struct kernel_param_ops param_ops_battery_technology = {
+ .set = param_set_battery_technology,
+ .get = param_get_battery_technology,
+};
+
+static struct kernel_param_ops param_ops_battery_health = {
+ .set = param_set_battery_health,
+ .get = param_get_battery_health,
+};
+
+static struct kernel_param_ops param_ops_battery_capacity = {
+ .set = param_set_battery_capacity,
+ .get = param_get_battery_capacity,
+};
+
+
+#define param_check_ac_online(name, p) __param_check(name, p, void);
+#define param_check_battery_status(name, p) __param_check(name, p, void);
+#define param_check_battery_present(name, p) __param_check(name, p, void);
+#define param_check_battery_technology(name, p) __param_check(name, p, void);
+#define param_check_battery_health(name, p) __param_check(name, p, void);
+#define param_check_battery_capacity(name, p) __param_check(name, p, void);
+
+
+module_param(ac_online, ac_online, 0644);
+MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
+
+module_param(battery_status, battery_status, 0644);
+MODULE_PARM_DESC(battery_status,
+ "battery status <charging|discharging|not-charging|full>");
+
+module_param(battery_present, battery_present, 0644);
+MODULE_PARM_DESC(battery_present,
+ "battery presence state <good|overheat|dead|overvoltage|failure>");
+
+module_param(battery_technology, battery_technology, 0644);
+MODULE_PARM_DESC(battery_technology,
+ "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>");
+
+module_param(battery_health, battery_health, 0644);
+MODULE_PARM_DESC(battery_health,
+ "battery health state <good|overheat|dead|overvoltage|failure>");
+
+module_param(battery_capacity, battery_capacity, 0644);
+MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
+
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index e5ced3a4c1ed..d119c38b3ff6 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -271,24 +271,33 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
+static int z2_batt_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct z2_charger *charger = i2c_get_clientdata(client);
flush_work_sync(&charger->bat_work);
return 0;
}
-static int z2_batt_resume(struct i2c_client *client)
+static int z2_batt_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct z2_charger *charger = i2c_get_clientdata(client);
schedule_work(&charger->bat_work);
return 0;
}
+
+static const struct dev_pm_ops z2_battery_pm_ops = {
+ .suspend = z2_batt_suspend,
+ .resume = z2_batt_resume,
+};
+
+#define Z2_BATTERY_PM_OPS (&z2_battery_pm_ops)
+
#else
-#define z2_batt_suspend NULL
-#define z2_batt_resume NULL
+#define Z2_BATTERY_PM_OPS (NULL)
#endif
static const struct i2c_device_id z2_batt_id[] = {
@@ -301,11 +310,10 @@ static struct i2c_driver z2_batt_driver = {
.driver = {
.name = "z2-battery",
.owner = THIS_MODULE,
+ .pm = Z2_BATTERY_PM_OPS
},
.probe = z2_batt_probe,
.remove = z2_batt_remove,
- .suspend = z2_batt_suspend,
- .resume = z2_batt_resume,
.id_table = z2_batt_id,
};
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index a8d03aeb4051..e7f301da2902 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -46,7 +46,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.n_ext_ts = ptp->info->n_ext_ts;
caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps;
- err = copy_to_user((void __user *)arg, &caps, sizeof(caps));
+ if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
+ err = -EFAULT;
break;
case PTP_EXTTS_REQUEST:
@@ -129,8 +130,10 @@ ssize_t ptp_read(struct posix_clock *pc,
return -ERESTARTSYS;
}
- if (ptp->defunct)
+ if (ptp->defunct) {
+ mutex_unlock(&ptp->tsevq_mux);
return -ENODEV;
+ }
spin_lock_irqsave(&queue->lock, flags);
@@ -150,10 +153,8 @@ ssize_t ptp_read(struct posix_clock *pc,
mutex_unlock(&ptp->tsevq_mux);
- if (copy_to_user(buf, event, cnt)) {
- mutex_unlock(&ptp->tsevq_mux);
+ if (copy_to_user(buf, event, cnt))
return -EFAULT;
- }
return cnt;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f0b13a0d1851..d7ed20f293d7 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -297,5 +297,11 @@ config REGULATOR_TPS6524X
serial interface currently supported on the sequencer serial
port controller.
+config REGULATOR_TPS65910
+ tristate "TI TPS65910 Power Regulator"
+ depends on MFD_TPS65910
+ help
+ This driver supports TPS65910 voltage regulator chips.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 165ff5371e9e..3932d2ec38f3 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
+obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0fae51c4845a..d3e38790906e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -158,6 +158,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
struct regulator *regulator;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ /*
+ * Assume consumers that didn't say anything are OK
+ * with anything in the constraint range.
+ */
+ if (!regulator->min_uV && !regulator->max_uV)
+ continue;
+
if (*max_uV > regulator->max_uV)
*max_uV = regulator->max_uV;
if (*min_uV < regulator->min_uV)
@@ -197,9 +204,9 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
}
/* operating mode constraint check */
-static int regulator_check_mode(struct regulator_dev *rdev, int mode)
+static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
{
- switch (mode) {
+ switch (*mode) {
case REGULATOR_MODE_FAST:
case REGULATOR_MODE_NORMAL:
case REGULATOR_MODE_IDLE:
@@ -217,11 +224,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
- if (!(rdev->constraints->valid_modes_mask & mode)) {
- rdev_err(rdev, "invalid mode %x\n", mode);
- return -EINVAL;
+
+ /* The modes are bitmasks, the most power hungry modes having
+ * the lowest values. If the requested mode isn't supported
+ * try higher modes. */
+ while (*mode) {
+ if (rdev->constraints->valid_modes_mask & *mode)
+ return 0;
+ *mode /= 2;
}
- return 0;
+
+ return -EINVAL;
}
/* dynamic regulator mode switching constraint check */
@@ -612,7 +625,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
output_uV, current_uA);
/* check the new mode is allowed */
- err = regulator_check_mode(rdev, mode);
+ err = regulator_mode_constrain(rdev, &mode);
if (err == 0)
rdev->desc->ops->set_mode(rdev, mode);
}
@@ -718,6 +731,10 @@ static void print_constraints(struct regulator_dev *rdev)
count += sprintf(buf + count, "at %d mV ", ret / 1000);
}
+ if (constraints->uV_offset)
+ count += sprintf(buf, "%dmV offset ",
+ constraints->uV_offset / 1000);
+
if (constraints->min_uA && constraints->max_uA) {
if (constraints->min_uA == constraints->max_uA)
count += sprintf(buf + count, "%d mA ",
@@ -1498,13 +1515,14 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
*/
int regulator_force_disable(struct regulator *regulator)
{
+ struct regulator_dev *rdev = regulator->rdev;
struct regulator_dev *supply_rdev = NULL;
int ret;
- mutex_lock(&regulator->rdev->mutex);
+ mutex_lock(&rdev->mutex);
regulator->uA_load = 0;
- ret = _regulator_force_disable(regulator->rdev, &supply_rdev);
- mutex_unlock(&regulator->rdev->mutex);
+ ret = _regulator_force_disable(rdev, &supply_rdev);
+ mutex_unlock(&rdev->mutex);
if (supply_rdev)
regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
@@ -1634,6 +1652,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
+ min_uV += rdev->constraints->uV_offset;
+ max_uV += rdev->constraints->uV_offset;
+
if (rdev->desc->ops->set_voltage) {
ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
&selector);
@@ -1858,18 +1879,22 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
static int _regulator_get_voltage(struct regulator_dev *rdev)
{
- int sel;
+ int sel, ret;
if (rdev->desc->ops->get_voltage_sel) {
sel = rdev->desc->ops->get_voltage_sel(rdev);
if (sel < 0)
return sel;
- return rdev->desc->ops->list_voltage(rdev, sel);
- }
- if (rdev->desc->ops->get_voltage)
- return rdev->desc->ops->get_voltage(rdev);
- else
+ ret = rdev->desc->ops->list_voltage(rdev, sel);
+ } else if (rdev->desc->ops->get_voltage) {
+ ret = rdev->desc->ops->get_voltage(rdev);
+ } else {
return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+ return ret - rdev->constraints->uV_offset;
}
/**
@@ -2005,7 +2030,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
}
/* constraints check */
- ret = regulator_check_mode(rdev, mode);
+ ret = regulator_mode_constrain(rdev, &mode);
if (ret < 0)
goto out;
@@ -2081,16 +2106,26 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
mutex_lock(&rdev->mutex);
+ /*
+ * first check to see if we can set modes at all, otherwise just
+ * tell the consumer everything is OK.
+ */
regulator->uA_load = uA_load;
ret = regulator_check_drms(rdev);
- if (ret < 0)
+ if (ret < 0) {
+ ret = 0;
goto out;
- ret = -EINVAL;
+ }
- /* sanity check */
if (!rdev->desc->ops->get_optimum_mode)
goto out;
+ /*
+ * we can actually do this so any errors are indicators of
+ * potential real failure.
+ */
+ ret = -EINVAL;
+
/* get output voltage */
output_uV = _regulator_get_voltage(rdev);
if (output_uV <= 0) {
@@ -2116,7 +2151,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
mode = rdev->desc->ops->get_optimum_mode(rdev,
input_uV, output_uV,
total_uA_load);
- ret = regulator_check_mode(rdev, mode);
+ ret = regulator_mode_constrain(rdev, &mode);
if (ret < 0) {
rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
total_uA_load, input_uV, output_uV);
@@ -2589,14 +2624,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (ret < 0)
goto scrub;
- /* set supply regulator if it exists */
- if (init_data->supply_regulator && init_data->supply_regulator_dev) {
- dev_err(dev,
- "Supply regulator specified by both name and dev\n");
- ret = -EINVAL;
- goto scrub;
- }
-
if (init_data->supply_regulator) {
struct regulator_dev *r;
int found = 0;
@@ -2621,14 +2648,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
goto scrub;
}
- if (init_data->supply_regulator_dev) {
- dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
- ret = set_supply(rdev,
- dev_get_drvdata(init_data->supply_regulator_dev));
- if (ret < 0)
- goto scrub;
- }
-
/* add consumers devices */
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 77e0cfb30b23..10d5a1d9768e 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -267,7 +267,6 @@ static int max8997_get_enable_register(struct regulator_dev *rdev,
default:
/* Not controllable or not exists */
return -EINVAL;
- break;
}
return 0;
@@ -1033,11 +1032,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
/* For the safety, set max voltage before setting up */
for (i = 0; i < 8; i++) {
- max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
max_buck1, 0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
max_buck2, 0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
max_buck5, 0x3f);
}
@@ -1114,13 +1113,13 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
/* Initialize all the DVS related BUCK registers */
for (i = 0; i < 8; i++) {
- max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
max8997->buck1_vol[i],
0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
max8997->buck2_vol[i],
0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
max8997->buck5_vol[i],
0x3f);
}
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index f57e9c42fdb4..41a1495eec2b 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -732,13 +732,15 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
if (!pdata->buck1_set1) {
printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set1);
- return -EIO;
+ ret = -EIO;
+ goto err_free_mem;
}
/* Check if SET2 is not equal to 0 */
if (!pdata->buck1_set2) {
printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set2);
- return -EIO;
+ ret = -EIO;
+ goto err_free_mem;
}
gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -758,7 +760,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* Set predefined value for BUCK1 register 2 */
i = 0;
@@ -770,7 +772,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[1] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* Set predefined value for BUCK1 register 3 */
i = 0;
@@ -782,7 +784,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[2] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* Set predefined value for BUCK1 register 4 */
i = 0;
@@ -794,7 +796,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_vol[3] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
if (ret)
- return ret;
+ goto err_free_mem;
}
@@ -803,7 +805,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
if (!pdata->buck2_set3) {
printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck2_set3);
- return -EIO;
+ ret = -EIO;
+ goto err_free_mem;
}
gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
gpio_direction_output(pdata->buck2_set3,
@@ -818,7 +821,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck2_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
if (ret)
- return ret;
+ goto err_free_mem;
/* BUCK2 register 2 */
i = 0;
@@ -830,7 +833,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck2_vol[1] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
if (ret)
- return ret;
+ goto err_free_mem;
}
for (i = 0; i < pdata->num_regulators; i++) {
@@ -860,6 +863,7 @@ err:
if (rdev[i])
regulator_unregister(rdev[i]);
+err_free_mem:
kfree(max8998->rdev);
kfree(max8998);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1b8f7398a4a8..3285d41842f2 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -431,7 +431,8 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- int hi, value, val, mask, id = rdev_get_id(rdev);
+ int hi, value, mask, id = rdev_get_id(rdev);
+ u32 valread;
int ret;
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
@@ -447,15 +448,16 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx,
- mc13892_regulators[id].vsel_reg, &val);
+ mc13892_regulators[id].vsel_reg, &valread);
if (ret)
goto err;
- hi = val & MC13892_SWITCHERS0_SWxHI;
- if (value > 1375)
+ if (value > 1375000)
hi = 1;
- if (value < 1100)
+ else if (value < 1100000)
hi = 0;
+ else
+ hi = valread & MC13892_SWITCHERS0_SWxHI;
if (hi) {
value = (value - 1100000) / 25000;
@@ -464,8 +466,10 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
value = (value - 600000) / 25000;
mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
- ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
- mask, value << mc13892_regulators[id].vsel_shift);
+ valread = (valread & ~mask) |
+ (value << mc13892_regulators[id].vsel_shift);
+ ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+ valread);
err:
mc13xxx_unlock(priv->mc13xxx);
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 2bb5de1f2421..bc27ab136378 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
- BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
+ BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index a4d7f4540c18..1011873896dc 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -158,6 +158,7 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
"failed to register regulator\n");
return ret;
}
+ platform_set_drvdata(pdev, tps6105x);
return 0;
}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 60a7ca5409e9..fbddc15e1811 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -466,7 +466,6 @@ static struct regulator_ops tps65023_ldo_ops = {
static int __devinit tps_65023_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- static int desc_id;
const struct tps_info *info = (void *)id->driver_data;
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
@@ -499,7 +498,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
tps->info[i] = info;
tps->desc[i].name = info->name;
- tps->desc[i].id = desc_id++;
+ tps->desc[i].id = i;
tps->desc[i].n_voltages = num_voltages[i];
tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
&tps65023_ldo_ops : &tps65023_dcdc_ops);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 064755290599..bfffabc21eda 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -553,7 +553,6 @@ static __devinit
int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
- static int desc_id;
struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
@@ -598,7 +597,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
}
tps->desc[i].name = info->name;
- tps->desc[i].id = desc_id++;
+ tps->desc[i].id = i;
tps->desc[i].n_voltages = num_voltages[i];
tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
&tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
new file mode 100644
index 000000000000..55dd4e6650db
--- /dev/null
+++ b/drivers/regulator/tps65910-regulator.c
@@ -0,0 +1,993 @@
+/*
+ * tps65910.c -- TI tps65910
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+#define TPS65910_REG_VRTC 0
+#define TPS65910_REG_VIO 1
+#define TPS65910_REG_VDD1 2
+#define TPS65910_REG_VDD2 3
+#define TPS65910_REG_VDD3 4
+#define TPS65910_REG_VDIG1 5
+#define TPS65910_REG_VDIG2 6
+#define TPS65910_REG_VPLL 7
+#define TPS65910_REG_VDAC 8
+#define TPS65910_REG_VAUX1 9
+#define TPS65910_REG_VAUX2 10
+#define TPS65910_REG_VAUX33 11
+#define TPS65910_REG_VMMC 12
+
+#define TPS65911_REG_VDDCTRL 4
+#define TPS65911_REG_LDO1 5
+#define TPS65911_REG_LDO2 6
+#define TPS65911_REG_LDO3 7
+#define TPS65911_REG_LDO4 8
+#define TPS65911_REG_LDO5 9
+#define TPS65911_REG_LDO6 10
+#define TPS65911_REG_LDO7 11
+#define TPS65911_REG_LDO8 12
+
+#define TPS65910_NUM_REGULATOR 13
+#define TPS65910_SUPPLY_STATE_ENABLED 0x1
+
+/* supported VIO voltages in milivolts */
+static const u16 VIO_VSEL_table[] = {
+ 1500, 1800, 2500, 3300,
+};
+
+/* VSEL tables for TPS65910 specific LDOs and dcdc's */
+
+/* supported VDD3 voltages in milivolts */
+static const u16 VDD3_VSEL_table[] = {
+ 5000,
+};
+
+/* supported VDIG1 voltages in milivolts */
+static const u16 VDIG1_VSEL_table[] = {
+ 1200, 1500, 1800, 2700,
+};
+
+/* supported VDIG2 voltages in milivolts */
+static const u16 VDIG2_VSEL_table[] = {
+ 1000, 1100, 1200, 1800,
+};
+
+/* supported VPLL voltages in milivolts */
+static const u16 VPLL_VSEL_table[] = {
+ 1000, 1100, 1800, 2500,
+};
+
+/* supported VDAC voltages in milivolts */
+static const u16 VDAC_VSEL_table[] = {
+ 1800, 2600, 2800, 2850,
+};
+
+/* supported VAUX1 voltages in milivolts */
+static const u16 VAUX1_VSEL_table[] = {
+ 1800, 2500, 2800, 2850,
+};
+
+/* supported VAUX2 voltages in milivolts */
+static const u16 VAUX2_VSEL_table[] = {
+ 1800, 2800, 2900, 3300,
+};
+
+/* supported VAUX33 voltages in milivolts */
+static const u16 VAUX33_VSEL_table[] = {
+ 1800, 2000, 2800, 3300,
+};
+
+/* supported VMMC voltages in milivolts */
+static const u16 VMMC_VSEL_table[] = {
+ 1800, 2800, 3000, 3300,
+};
+
+struct tps_info {
+ const char *name;
+ unsigned min_uV;
+ unsigned max_uV;
+ u8 table_len;
+ const u16 *table;
+};
+
+static struct tps_info tps65910_regs[] = {
+ {
+ .name = "VRTC",
+ },
+ {
+ .name = "VIO",
+ .min_uV = 1500000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VIO_VSEL_table),
+ .table = VIO_VSEL_table,
+ },
+ {
+ .name = "VDD1",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDD2",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDD3",
+ .min_uV = 5000000,
+ .max_uV = 5000000,
+ .table_len = ARRAY_SIZE(VDD3_VSEL_table),
+ .table = VDD3_VSEL_table,
+ },
+ {
+ .name = "VDIG1",
+ .min_uV = 1200000,
+ .max_uV = 2700000,
+ .table_len = ARRAY_SIZE(VDIG1_VSEL_table),
+ .table = VDIG1_VSEL_table,
+ },
+ {
+ .name = "VDIG2",
+ .min_uV = 1000000,
+ .max_uV = 1800000,
+ .table_len = ARRAY_SIZE(VDIG2_VSEL_table),
+ .table = VDIG2_VSEL_table,
+ },
+ {
+ .name = "VPLL",
+ .min_uV = 1000000,
+ .max_uV = 2500000,
+ .table_len = ARRAY_SIZE(VPLL_VSEL_table),
+ .table = VPLL_VSEL_table,
+ },
+ {
+ .name = "VDAC",
+ .min_uV = 1800000,
+ .max_uV = 2850000,
+ .table_len = ARRAY_SIZE(VDAC_VSEL_table),
+ .table = VDAC_VSEL_table,
+ },
+ {
+ .name = "VAUX1",
+ .min_uV = 1800000,
+ .max_uV = 2850000,
+ .table_len = ARRAY_SIZE(VAUX1_VSEL_table),
+ .table = VAUX1_VSEL_table,
+ },
+ {
+ .name = "VAUX2",
+ .min_uV = 1800000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VAUX2_VSEL_table),
+ .table = VAUX2_VSEL_table,
+ },
+ {
+ .name = "VAUX33",
+ .min_uV = 1800000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VAUX33_VSEL_table),
+ .table = VAUX33_VSEL_table,
+ },
+ {
+ .name = "VMMC",
+ .min_uV = 1800000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VMMC_VSEL_table),
+ .table = VMMC_VSEL_table,
+ },
+};
+
+static struct tps_info tps65911_regs[] = {
+ {
+ .name = "VIO",
+ .min_uV = 1500000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VIO_VSEL_table),
+ .table = VIO_VSEL_table,
+ },
+ {
+ .name = "VDD1",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDD2",
+ .min_uV = 600000,
+ .max_uV = 4500000,
+ },
+ {
+ .name = "VDDCTRL",
+ .min_uV = 600000,
+ .max_uV = 1400000,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO3",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO4",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO5",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO6",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO7",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+ {
+ .name = "LDO8",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ },
+};
+
+struct tps65910_reg {
+ struct regulator_desc desc[TPS65910_NUM_REGULATOR];
+ struct tps65910 *mfd;
+ struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
+ struct tps_info *info[TPS65910_NUM_REGULATOR];
+ struct mutex mutex;
+ int mode;
+ int (*get_ctrl_reg)(int);
+};
+
+static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
+{
+ u8 val;
+ int err;
+
+ err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+{
+ return pmic->mfd->write(pmic->mfd, reg, 1, &val);
+}
+
+static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
+ u8 set_mask, u8 clear_mask)
+{
+ int err, data;
+
+ mutex_lock(&pmic->mutex);
+
+ data = tps65910_read(pmic, reg);
+ if (data < 0) {
+ dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+
+ data &= ~clear_mask;
+ data |= set_mask;
+ err = tps65910_write(pmic, reg, data);
+ if (err)
+ dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&pmic->mutex);
+ return err;
+}
+
+static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+{
+ int data;
+
+ mutex_lock(&pmic->mutex);
+
+ data = tps65910_read(pmic, reg);
+ if (data < 0)
+ dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&pmic->mutex);
+ return data;
+}
+
+static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&pmic->mutex);
+
+ err = tps65910_write(pmic, reg, val);
+ if (err < 0)
+ dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&pmic->mutex);
+ return err;
+}
+
+static int tps65910_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65910_REG_VRTC:
+ return TPS65910_VRTC;
+ case TPS65910_REG_VIO:
+ return TPS65910_VIO;
+ case TPS65910_REG_VDD1:
+ return TPS65910_VDD1;
+ case TPS65910_REG_VDD2:
+ return TPS65910_VDD2;
+ case TPS65910_REG_VDD3:
+ return TPS65910_VDD3;
+ case TPS65910_REG_VDIG1:
+ return TPS65910_VDIG1;
+ case TPS65910_REG_VDIG2:
+ return TPS65910_VDIG2;
+ case TPS65910_REG_VPLL:
+ return TPS65910_VPLL;
+ case TPS65910_REG_VDAC:
+ return TPS65910_VDAC;
+ case TPS65910_REG_VAUX1:
+ return TPS65910_VAUX1;
+ case TPS65910_REG_VAUX2:
+ return TPS65910_VAUX2;
+ case TPS65910_REG_VAUX33:
+ return TPS65910_VAUX33;
+ case TPS65910_REG_VMMC:
+ return TPS65910_VMMC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65911_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65910_REG_VRTC:
+ return TPS65910_VRTC;
+ case TPS65910_REG_VIO:
+ return TPS65910_VIO;
+ case TPS65910_REG_VDD1:
+ return TPS65910_VDD1;
+ case TPS65910_REG_VDD2:
+ return TPS65910_VDD2;
+ case TPS65911_REG_VDDCTRL:
+ return TPS65911_VDDCTRL;
+ case TPS65911_REG_LDO1:
+ return TPS65911_LDO1;
+ case TPS65911_REG_LDO2:
+ return TPS65911_LDO2;
+ case TPS65911_REG_LDO3:
+ return TPS65911_LDO3;
+ case TPS65911_REG_LDO4:
+ return TPS65911_LDO4;
+ case TPS65911_REG_LDO5:
+ return TPS65911_LDO5;
+ case TPS65911_REG_LDO6:
+ return TPS65911_LDO6;
+ case TPS65911_REG_LDO7:
+ return TPS65911_LDO7;
+ case TPS65911_REG_LDO8:
+ return TPS65911_LDO8;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65910_is_enabled(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, value, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65910_reg_read(pmic, reg);
+ if (value < 0)
+ return value;
+
+ return value & TPS65910_SUPPLY_STATE_ENABLED;
+}
+
+static int tps65910_enable(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65910 *mfd = pmic->mfd;
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
+}
+
+static int tps65910_disable(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65910 *mfd = pmic->mfd;
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
+}
+
+
+static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65910 *mfd = pmic->mfd;
+ int reg, value, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
+ LDO_ST_MODE_BIT);
+ case REGULATOR_MODE_IDLE:
+ value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
+ return tps65910_set_bits(mfd, reg, value);
+ case REGULATOR_MODE_STANDBY:
+ return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+ }
+
+ return -EINVAL;
+}
+
+static unsigned int tps65910_get_mode(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, value, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65910_reg_read(pmic, reg);
+ if (value < 0)
+ return value;
+
+ if (value & LDO_ST_ON_BIT)
+ return REGULATOR_MODE_STANDBY;
+ else if (value & LDO_ST_MODE_BIT)
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), voltage = 0;
+ int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
+ mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+ mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
+ srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+ sr = opvsel & VDD1_OP_CMD_MASK;
+ opvsel &= VDD1_OP_SEL_MASK;
+ srvsel &= VDD1_SR_SEL_MASK;
+ vselmax = 75;
+ break;
+ case TPS65910_REG_VDD2:
+ opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
+ mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+ mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
+ srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+ sr = opvsel & VDD2_OP_CMD_MASK;
+ opvsel &= VDD2_OP_SEL_MASK;
+ srvsel &= VDD2_SR_SEL_MASK;
+ vselmax = 75;
+ break;
+ case TPS65911_REG_VDDCTRL:
+ opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
+ srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+ sr = opvsel & VDDCTRL_OP_CMD_MASK;
+ opvsel &= VDDCTRL_OP_SEL_MASK;
+ srvsel &= VDDCTRL_SR_SEL_MASK;
+ vselmax = 64;
+ break;
+ }
+
+ /* multiplier 0 == 1 but 2,3 normal */
+ if (!mult)
+ mult=1;
+
+ if (sr) {
+ /* normalise to valid range */
+ if (srvsel < 3)
+ srvsel = 3;
+ if (srvsel > vselmax)
+ srvsel = vselmax;
+ srvsel -= 3;
+
+ voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+ } else {
+
+ /* normalise to valid range*/
+ if (opvsel < 3)
+ opvsel = 3;
+ if (opvsel > vselmax)
+ opvsel = vselmax;
+ opvsel -= 3;
+
+ voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+ }
+
+ voltage *= mult;
+
+ return voltage;
+}
+
+static int tps65910_get_voltage(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, value, id = rdev_get_id(dev), voltage = 0;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65910_reg_read(pmic, reg);
+ if (value < 0)
+ return value;
+
+ switch (id) {
+ case TPS65910_REG_VIO:
+ case TPS65910_REG_VDIG1:
+ case TPS65910_REG_VDIG2:
+ case TPS65910_REG_VPLL:
+ case TPS65910_REG_VDAC:
+ case TPS65910_REG_VAUX1:
+ case TPS65910_REG_VAUX2:
+ case TPS65910_REG_VAUX33:
+ case TPS65910_REG_VMMC:
+ value &= LDO_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ voltage = pmic->info[id]->table[value] * 1000;
+
+ return voltage;
+}
+
+static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
+{
+ return 5 * 1000 * 1000;
+}
+
+static int tps65911_get_voltage(struct regulator_dev *dev)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int step_mv, id = rdev_get_id(dev);
+ u8 value, reg;
+
+ reg = pmic->get_ctrl_reg(id);
+
+ value = tps65910_reg_read(pmic, reg);
+
+ switch (id) {
+ case TPS65911_REG_LDO1:
+ case TPS65911_REG_LDO2:
+ case TPS65911_REG_LDO4:
+ value &= LDO1_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ /* The first 5 values of the selector correspond to 1V */
+ if (value < 5)
+ value = 0;
+ else
+ value -= 4;
+
+ step_mv = 50;
+ break;
+ case TPS65911_REG_LDO3:
+ case TPS65911_REG_LDO5:
+ case TPS65911_REG_LDO6:
+ case TPS65911_REG_LDO7:
+ case TPS65911_REG_LDO8:
+ value &= LDO3_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ /* The first 3 values of the selector correspond to 1V */
+ if (value < 3)
+ value = 0;
+ else
+ value -= 2;
+
+ step_mv = 100;
+ break;
+ case TPS65910_REG_VIO:
+ return pmic->info[id]->table[value] * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (LDO_MIN_VOLT + value * step_mv) * 1000;
+}
+
+static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), vsel;
+ int dcdc_mult = 0;
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ if (dcdc_mult == 1)
+ dcdc_mult--;
+ vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+
+ tps65910_modify_bits(pmic, TPS65910_VDD1,
+ (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
+ VDD1_VGAIN_SEL_MASK);
+ tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+ break;
+ case TPS65910_REG_VDD2:
+ dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ if (dcdc_mult == 1)
+ dcdc_mult--;
+ vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+
+ tps65910_modify_bits(pmic, TPS65910_VDD2,
+ (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
+ VDD1_VGAIN_SEL_MASK);
+ tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+ break;
+ case TPS65911_REG_VDDCTRL:
+ vsel = selector;
+ tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+ }
+
+ return 0;
+}
+
+static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ switch (id) {
+ case TPS65910_REG_VIO:
+ case TPS65910_REG_VDIG1:
+ case TPS65910_REG_VDIG2:
+ case TPS65910_REG_VPLL:
+ case TPS65910_REG_VDAC:
+ case TPS65910_REG_VAUX1:
+ case TPS65910_REG_VAUX2:
+ case TPS65910_REG_VAUX33:
+ case TPS65910_REG_VMMC:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+ }
+
+ return -EINVAL;
+}
+
+static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int reg, id = rdev_get_id(dev);
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ switch (id) {
+ case TPS65911_REG_LDO1:
+ case TPS65911_REG_LDO2:
+ case TPS65911_REG_LDO4:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
+ case TPS65911_REG_LDO3:
+ case TPS65911_REG_LDO5:
+ case TPS65911_REG_LDO6:
+ case TPS65911_REG_LDO7:
+ case TPS65911_REG_LDO8:
+ case TPS65910_REG_VIO:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
+ }
+
+ return -EINVAL;
+}
+
+
+static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int volt, mult = 1, id = rdev_get_id(dev);
+
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ case TPS65910_REG_VDD2:
+ mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ volt = VDD1_2_MIN_VOLT +
+ (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ case TPS65911_REG_VDDCTRL:
+ volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
+ }
+
+ return volt * 100 * mult;
+}
+
+static int tps65910_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev), voltage;
+
+ if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
+ return -EINVAL;
+
+ if (selector >= pmic->info[id]->table_len)
+ return -EINVAL;
+ else
+ voltage = pmic->info[id]->table[selector] * 1000;
+
+ return voltage;
+}
+
+static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
+{
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int step_mv = 0, id = rdev_get_id(dev);
+
+ switch(id) {
+ case TPS65911_REG_LDO1:
+ case TPS65911_REG_LDO2:
+ case TPS65911_REG_LDO4:
+ /* The first 5 values of the selector correspond to 1V */
+ if (selector < 5)
+ selector = 0;
+ else
+ selector -= 4;
+
+ step_mv = 50;
+ break;
+ case TPS65911_REG_LDO3:
+ case TPS65911_REG_LDO5:
+ case TPS65911_REG_LDO6:
+ case TPS65911_REG_LDO7:
+ case TPS65911_REG_LDO8:
+ /* The first 3 values of the selector correspond to 1V */
+ if (selector < 3)
+ selector = 0;
+ else
+ selector -= 2;
+
+ step_mv = 100;
+ break;
+ case TPS65910_REG_VIO:
+ return pmic->info[id]->table[selector] * 1000;
+ default:
+ return -EINVAL;
+ }
+
+ return (LDO_MIN_VOLT + selector * step_mv) * 1000;
+}
+
+/* Regulator ops (except VRTC) */
+static struct regulator_ops tps65910_ops_dcdc = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65910_get_voltage_dcdc,
+ .set_voltage_sel = tps65910_set_voltage_dcdc,
+ .list_voltage = tps65910_list_voltage_dcdc,
+};
+
+static struct regulator_ops tps65910_ops_vdd3 = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65910_get_voltage_vdd3,
+ .list_voltage = tps65910_list_voltage,
+};
+
+static struct regulator_ops tps65910_ops = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65910_get_voltage,
+ .set_voltage_sel = tps65910_set_voltage,
+ .list_voltage = tps65910_list_voltage,
+};
+
+static struct regulator_ops tps65911_ops = {
+ .is_enabled = tps65910_is_enabled,
+ .enable = tps65910_enable,
+ .disable = tps65910_disable,
+ .set_mode = tps65910_set_mode,
+ .get_mode = tps65910_get_mode,
+ .get_voltage = tps65911_get_voltage,
+ .set_voltage_sel = tps65911_set_voltage,
+ .list_voltage = tps65911_list_voltage,
+};
+
+static __devinit int tps65910_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct tps65910_reg *pmic;
+ struct tps65910_board *pmic_plat_data;
+ int i, err;
+
+ pmic_plat_data = dev_get_platdata(tps65910->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ reg_data = pmic_plat_data->tps65910_pmic_init_data;
+
+ pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ mutex_init(&pmic->mutex);
+ pmic->mfd = tps65910;
+ platform_set_drvdata(pdev, pmic);
+
+ /* Give control of all register to control port */
+ tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ DEVCTRL_SR_CTL_I2C_SEL_MASK);
+
+ switch(tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
+ info = tps65910_regs;
+ case TPS65911:
+ pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+ info = tps65911_regs;
+ default:
+ pr_err("Invalid tps chip version\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
+ /* Register the regulators */
+ pmic->info[i] = info;
+
+ pmic->desc[i].name = info->name;
+ pmic->desc[i].id = i;
+ pmic->desc[i].n_voltages = info->table_len;
+
+ if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
+ pmic->desc[i].ops = &tps65910_ops_dcdc;
+ } else if (i == TPS65910_REG_VDD3) {
+ if (tps65910_chip_id(tps65910) == TPS65910)
+ pmic->desc[i].ops = &tps65910_ops_vdd3;
+ else
+ pmic->desc[i].ops = &tps65910_ops_dcdc;
+ } else {
+ if (tps65910_chip_id(tps65910) == TPS65910)
+ pmic->desc[i].ops = &tps65910_ops;
+ else
+ pmic->desc[i].ops = &tps65911_ops;
+ }
+
+ pmic->desc[i].type = REGULATOR_VOLTAGE;
+ pmic->desc[i].owner = THIS_MODULE;
+
+ rdev = regulator_register(&pmic->desc[i],
+ tps65910->dev, reg_data, pmic);
+ if (IS_ERR(rdev)) {
+ dev_err(tps65910->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+
+ /* Save regulator for cleanup */
+ pmic->rdev[i] = rdev;
+ }
+ return 0;
+
+err:
+ while (--i >= 0)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+ return err;
+}
+
+static int __devexit tps65910_remove(struct platform_device *pdev)
+{
+ struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
+ regulator_unregister(tps65910_reg->rdev[i]);
+
+ kfree(tps65910_reg);
+ return 0;
+}
+
+static struct platform_driver tps65910_driver = {
+ .driver = {
+ .name = "tps65910-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65910_probe,
+ .remove = __devexit_p(tps65910_remove),
+};
+
+static int __init tps65910_init(void)
+{
+ return platform_driver_register(&tps65910_driver);
+}
+subsys_initcall(tps65910_init);
+
+static void __exit tps65910_cleanup(void)
+{
+ platform_driver_unregister(&tps65910_driver);
+}
+module_exit(tps65910_cleanup);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6a292852a358..87fe0f75a56e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -51,8 +51,13 @@ struct twlreg_info {
u16 min_mV;
u16 max_mV;
+ u8 flags;
+
/* used by regulator core */
struct regulator_desc desc;
+
+ /* chip specific features */
+ unsigned long features;
};
@@ -70,12 +75,35 @@ struct twlreg_info {
#define VREG_TRANS 1
#define VREG_STATE 2
#define VREG_VOLTAGE 3
+#define VREG_VOLTAGE_SMPS 4
/* TWL6030 Misc register offsets */
#define VREG_BC_ALL 1
#define VREG_BC_REF 2
#define VREG_BC_PROC 3
#define VREG_BC_CLK_RST 4
+/* TWL6030 LDO register values for CFG_STATE */
+#define TWL6030_CFG_STATE_OFF 0x00
+#define TWL6030_CFG_STATE_ON 0x01
+#define TWL6030_CFG_STATE_OFF2 0x02
+#define TWL6030_CFG_STATE_SLEEP 0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT 5
+#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+ TWL6030_CFG_STATE_APP_SHIFT)
+
+/* Flags for SMPS Voltage reading */
+#define SMPS_OFFSET_EN BIT(0)
+#define SMPS_EXTENDED_EN BIT(1)
+
+/* twl6025 SMPS EPROM values */
+#define TWL6030_SMPS_OFFSET 0xB0
+#define TWL6030_SMPS_MULT 0xB3
+#define SMPS_MULTOFFSET_SMPS4 BIT(0)
+#define SMPS_MULTOFFSET_VIO BIT(1)
+#define SMPS_MULTOFFSET_SMPS3 BIT(6)
+
static inline int
twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
{
@@ -118,21 +146,38 @@ static int twlreg_grp(struct regulator_dev *rdev)
#define P2_GRP_6030 BIT(1) /* "peripherals" */
#define P1_GRP_6030 BIT(0) /* CPU/Linux */
-static int twlreg_is_enabled(struct regulator_dev *rdev)
+static int twl4030reg_is_enabled(struct regulator_dev *rdev)
{
int state = twlreg_grp(rdev);
if (state < 0)
return state;
- if (twl_class_is_4030())
- state &= P1_GRP_4030;
+ return state & P1_GRP_4030;
+}
+
+static int twl6030reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0, val;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp &= P1_GRP_6030;
else
- state &= P1_GRP_6030;
- return state;
+ grp = 1;
+
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val = TWL6030_CFG_STATE_APP(val);
+
+ return grp && (val == TWL6030_CFG_STATE_ON);
}
-static int twlreg_enable(struct regulator_dev *rdev)
+static int twl4030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
@@ -142,10 +187,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
if (grp < 0)
return grp;
- if (twl_class_is_4030())
- grp |= P1_GRP_4030;
- else
- grp |= P1_GRP_6030;
+ grp |= P1_GRP_4030;
ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
@@ -154,29 +196,63 @@ static int twlreg_enable(struct regulator_dev *rdev)
return ret;
}
-static int twlreg_disable(struct regulator_dev *rdev)
+static int twl6030reg_enable(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int ret;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
+
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ grp << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_ON);
+
+ udelay(info->delay);
+
+ return ret;
+}
+
+static int twl4030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
+ int ret;
grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
return grp;
- if (twl_class_is_4030())
- grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
- else
- grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
+ grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+
+ return ret;
}
-static int twlreg_get_status(struct regulator_dev *rdev)
+static int twl6030reg_disable(struct regulator_dev *rdev)
{
- int state = twlreg_grp(rdev);
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int ret;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
+
+ /* For 6030, set the off state for all grps enabled */
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_OFF);
+
+ return ret;
+}
- if (twl_class_is_6030())
- return 0; /* FIXME return for 6030 regulator */
+static int twl4030reg_get_status(struct regulator_dev *rdev)
+{
+ int state = twlreg_grp(rdev);
if (state < 0)
return state;
@@ -190,15 +266,39 @@ static int twlreg_get_status(struct regulator_dev *rdev)
: REGULATOR_STATUS_STANDBY;
}
-static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int twl6030reg_get_status(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ val = twlreg_grp(rdev);
+ if (val < 0)
+ return val;
+
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+
+ switch (TWL6030_CFG_STATE_APP(val)) {
+ case TWL6030_CFG_STATE_ON:
+ return REGULATOR_STATUS_NORMAL;
+
+ case TWL6030_CFG_STATE_SLEEP:
+ return REGULATOR_STATUS_STANDBY;
+
+ case TWL6030_CFG_STATE_OFF:
+ case TWL6030_CFG_STATE_OFF2:
+ default:
+ break;
+ }
+
+ return REGULATOR_STATUS_OFF;
+}
+
+static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
unsigned message;
int status;
- if (twl_class_is_6030())
- return 0; /* FIXME return for 6030 regulator */
-
/* We can only set the mode through state machine commands... */
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -227,6 +327,36 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
}
+static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int grp = 0;
+ int val;
+
+ if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+
+ if (grp < 0)
+ return grp;
+
+ /* Compose the state register settings */
+ val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
+ /* We can only set the mode through state machine commands... */
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val |= TWL6030_CFG_STATE_ON;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val |= TWL6030_CFG_STATE_SLEEP;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
+}
+
/*----------------------------------------------------------------------*/
/*
@@ -375,13 +505,13 @@ static struct regulator_ops twl4030ldo_ops = {
.set_voltage = twl4030ldo_set_voltage,
.get_voltage = twl4030ldo_get_voltage,
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
+ .enable = twl4030reg_enable,
+ .disable = twl4030reg_disable,
+ .is_enabled = twl4030reg_is_enabled,
- .set_mode = twlreg_set_mode,
+ .set_mode = twl4030reg_set_mode,
- .get_status = twlreg_get_status,
+ .get_status = twl4030reg_get_status,
};
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
@@ -433,13 +563,13 @@ static struct regulator_ops twl6030ldo_ops = {
.set_voltage = twl6030ldo_set_voltage,
.get_voltage = twl6030ldo_get_voltage,
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
- .set_mode = twlreg_set_mode,
+ .set_mode = twl6030reg_set_mode,
- .get_status = twlreg_get_status,
+ .get_status = twl6030reg_get_status,
};
/*----------------------------------------------------------------------*/
@@ -461,25 +591,242 @@ static int twlfixed_get_voltage(struct regulator_dev *rdev)
return info->min_mV * 1000;
}
-static struct regulator_ops twlfixed_ops = {
+static struct regulator_ops twl4030fixed_ops = {
+ .list_voltage = twlfixed_list_voltage,
+
+ .get_voltage = twlfixed_get_voltage,
+
+ .enable = twl4030reg_enable,
+ .disable = twl4030reg_disable,
+ .is_enabled = twl4030reg_is_enabled,
+
+ .set_mode = twl4030reg_set_mode,
+
+ .get_status = twl4030reg_get_status,
+};
+
+static struct regulator_ops twl6030fixed_ops = {
.list_voltage = twlfixed_list_voltage,
.get_voltage = twlfixed_get_voltage,
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
- .set_mode = twlreg_set_mode,
+ .set_mode = twl6030reg_set_mode,
- .get_status = twlreg_get_status,
+ .get_status = twl6030reg_get_status,
};
static struct regulator_ops twl6030_fixed_resource = {
- .enable = twlreg_enable,
- .disable = twlreg_disable,
- .is_enabled = twlreg_is_enabled,
- .get_status = twlreg_get_status,
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+ .get_status = twl6030reg_get_status,
+};
+
+/*
+ * SMPS status and control
+ */
+
+static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ int voltage = 0;
+
+ switch (info->flags) {
+ case SMPS_OFFSET_EN:
+ voltage = 100000;
+ /* fall through */
+ case 0:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 1350 * 1000;
+ break;
+ case 59:
+ voltage = 1500 * 1000;
+ break;
+ case 60:
+ voltage = 1800 * 1000;
+ break;
+ case 61:
+ voltage = 1900 * 1000;
+ break;
+ case 62:
+ voltage = 2100 * 1000;
+ break;
+ default:
+ voltage += (600000 + (12500 * (index - 1)));
+ }
+ break;
+ case SMPS_EXTENDED_EN:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 2084 * 1000;
+ break;
+ case 59:
+ voltage = 2315 * 1000;
+ break;
+ case 60:
+ voltage = 2778 * 1000;
+ break;
+ case 61:
+ voltage = 2932 * 1000;
+ break;
+ case 62:
+ voltage = 3241 * 1000;
+ break;
+ default:
+ voltage = (1852000 + (38600 * (index - 1)));
+ }
+ break;
+ case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
+ switch (index) {
+ case 0:
+ voltage = 0;
+ break;
+ case 58:
+ voltage = 4167 * 1000;
+ break;
+ case 59:
+ voltage = 2315 * 1000;
+ break;
+ case 60:
+ voltage = 2778 * 1000;
+ break;
+ case 61:
+ voltage = 2932 * 1000;
+ break;
+ case 62:
+ voltage = 3241 * 1000;
+ break;
+ default:
+ voltage = (2161000 + (38600 * (index - 1)));
+ }
+ break;
+ }
+
+ return voltage;
+}
+
+static int
+twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = 0;
+
+ switch (info->flags) {
+ case 0:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 600000) && (max_uV <= 1300000)) {
+ vsel = (min_uV - 600000) / 125;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ /* Values 1..57 for vsel are linear and can be calculated
+ * values 58..62 are non linear.
+ */
+ else if ((min_uV > 1900000) && (max_uV >= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (max_uV >= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1500000) && (max_uV >= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (max_uV >= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (max_uV >= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+ case SMPS_OFFSET_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 700000) && (max_uV <= 1420000)) {
+ vsel = (min_uV - 700000) / 125;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ /* Values 1..57 for vsel are linear and can be calculated
+ * values 58..62 are non linear.
+ */
+ else if ((min_uV > 1900000) && (max_uV >= 2100000))
+ vsel = 62;
+ else if ((min_uV > 1800000) && (max_uV >= 1900000))
+ vsel = 61;
+ else if ((min_uV > 1350000) && (max_uV >= 1800000))
+ vsel = 60;
+ else if ((min_uV > 1350000) && (max_uV >= 1500000))
+ vsel = 59;
+ else if ((min_uV > 1300000) && (max_uV >= 1350000))
+ vsel = 58;
+ else
+ return -EINVAL;
+ break;
+ case SMPS_EXTENDED_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
+ vsel = (min_uV - 1852000) / 386;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ break;
+ case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
+ if (min_uV == 0)
+ vsel = 0;
+ else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
+ vsel = (min_uV - 1852000) / 386;
+ if (vsel % 100)
+ vsel += 100;
+ vsel /= 100;
+ vsel++;
+ }
+ break;
+ }
+
+ *selector = vsel;
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
+ vsel);
+}
+
+static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
+}
+
+static struct regulator_ops twlsmps_ops = {
+ .list_voltage = twl6030smps_list_voltage,
+
+ .set_voltage = twl6030smps_set_voltage,
+ .get_voltage_sel = twl6030smps_get_voltage_sel,
+
+ .enable = twl6030reg_enable,
+ .disable = twl6030reg_disable,
+ .is_enabled = twl6030reg_is_enabled,
+
+ .set_mode = twl6030reg_set_mode,
+
+ .get_status = twl6030reg_get_status,
};
/*----------------------------------------------------------------------*/
@@ -487,11 +834,10 @@ static struct regulator_ops twl6030_fixed_resource = {
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL4030)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf) \
+ remap_conf, TWL4030, twl4030fixed_ops)
+#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL6030)
+ 0x0, TWL6030, twl6030fixed_ops)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
.base = offset, \
@@ -510,13 +856,11 @@ static struct regulator_ops twl6030_fixed_resource = {
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \
- remap_conf) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
.base = offset, \
.id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
- .remap = remap_conf, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
@@ -527,9 +871,23 @@ static struct regulator_ops twl6030_fixed_resource = {
}, \
}
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+ .base = offset, \
+ .id = num, \
+ .min_mV = min_mVolts, \
+ .max_mV = max_mVolts, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6025_REG_##label, \
+ .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
+ .ops = &twl6030ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
- family) { \
+ family, operations) { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
@@ -539,17 +897,16 @@ static struct regulator_ops twl6030_fixed_resource = {
.name = #label, \
.id = family##_REG_##label, \
.n_voltages = 1, \
- .ops = &twlfixed_ops, \
+ .ops = &operations, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
.base = offset, \
.id = num, \
.delay = turnon_delay, \
- .remap = remap_conf, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
@@ -559,6 +916,21 @@ static struct regulator_ops twl6030_fixed_resource = {
}, \
}
+#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+ .base = offset, \
+ .id = num, \
+ .min_mV = 600, \
+ .max_mV = 2100, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6025_REG_##label, \
+ .n_voltages = 63, \
+ .ops = &twlsmps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
/*
* We list regulators here if systems need some level of
* software control over them after boot.
@@ -589,19 +961,52 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21),
- TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+
+ /* 6025 are renamed compared to 6030 versions */
+ TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
+ TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
+ TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
+ TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
+ TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
+ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
+ TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
+ TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
+ TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
+
+ TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
+ TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
+ TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
};
+static u8 twl_get_smps_offset(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_OFFSET);
+ return value;
+}
+
+static u8 twl_get_smps_mult(void)
+{
+ u8 value;
+
+ twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+ TWL6030_SMPS_MULT);
+ return value;
+}
+
static int __devinit twlreg_probe(struct platform_device *pdev)
{
int i;
@@ -623,6 +1028,9 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
if (!initdata)
return -EINVAL;
+ /* copy the features into regulator data */
+ info->features = (unsigned long)initdata->driver_data;
+
/* Constrain board-specific capabilities according to what
* this driver and the chip itself can actually do.
*/
@@ -645,6 +1053,27 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
break;
}
+ switch (pdev->id) {
+ case TWL6025_REG_SMPS3:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ case TWL6025_REG_SMPS4:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ case TWL6025_REG_VIO:
+ if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
+ info->flags |= SMPS_EXTENDED_EN;
+ if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
+ info->flags |= SMPS_OFFSET_EN;
+ break;
+ }
+
rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
@@ -653,7 +1082,8 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
- twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
+ if (twl_class_is_4030())
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
info->remap);
/* NOTE: many regulators support short-circuit IRQs (presentable
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index e93453b1b978..a0982e809851 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,7 +600,6 @@ err:
static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
@@ -776,7 +775,6 @@ err:
static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
- struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index b42d01cef35a..0f12c70bebc9 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -55,7 +55,7 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
return 1600000 + ((selector - 14) * 100000);
}
-static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
+static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev)
{
struct wm8400 *wm8400 = rdev_get_drvdata(dev);
u16 val;
@@ -63,7 +63,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
val &= WM8400_LDO1_VSEL_MASK;
- return wm8400_ldo_list_voltage(dev, val);
+ return val;
}
static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
@@ -104,7 +104,7 @@ static struct regulator_ops wm8400_ldo_ops = {
.enable = wm8400_ldo_enable,
.disable = wm8400_ldo_disable,
.list_voltage = wm8400_ldo_list_voltage,
- .get_voltage = wm8400_ldo_get_voltage,
+ .get_voltage_sel = wm8400_ldo_get_voltage_sel,
.set_voltage = wm8400_ldo_set_voltage,
};
@@ -145,7 +145,7 @@ static int wm8400_dcdc_list_voltage(struct regulator_dev *dev,
return 850000 + (selector * 25000);
}
-static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
+static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct wm8400 *wm8400 = rdev_get_drvdata(dev);
u16 val;
@@ -154,7 +154,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
val &= WM8400_DC1_VSEL_MASK;
- return 850000 + (25000 * val);
+ return val;
}
static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
@@ -261,7 +261,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
.enable = wm8400_dcdc_enable,
.disable = wm8400_dcdc_disable,
.list_voltage = wm8400_dcdc_list_voltage,
- .get_voltage = wm8400_dcdc_get_voltage,
+ .get_voltage_sel = wm8400_dcdc_get_voltage_sel,
.set_voltage = wm8400_dcdc_set_voltage,
.get_mode = wm8400_dcdc_get_mode,
.set_mode = wm8400_dcdc_set_mode,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f822e13dc04b..ce2aabf5c550 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1051,4 +1051,13 @@ config RTC_DRV_TILE
Enable support for the Linux driver side of the Tilera
hypervisor's real-time clock interface.
+config RTC_DRV_PUV3
+ tristate "PKUnity v3 RTC support"
+ depends on ARCH_PUV3
+ help
+ This enables support for the RTC in the PKUnity-v3 SoCs.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-puv3.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 213d725f16d4..0ffefe877bfa 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
+obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index ef6316acec43..df68618f6dbb 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -318,7 +318,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
-int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
long now, scheduled;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index d0e06edb14c5..cace6d3aed9a 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -421,7 +421,8 @@ static long rtc_dev_ioctl(struct file *file,
err = ops->ioctl(rtc->dev.parent, cmd, arg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
- }
+ } else
+ err = -ENOTTY;
break;
}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4724ba3acf1a..b2005b44e4f7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds1340", ds_1340 },
{ "ds3231", ds_3231 },
{ "m41t00", m41t00 },
+ { "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ }
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 1a84b3e227d1..7317d3b9a3d5 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -189,7 +189,7 @@ static int __devinit m41t93_probe(struct spi_device *spi)
static int __devexit m41t93_remove(struct spi_device *spi)
{
- struct rtc_device *rtc = platform_get_drvdata(spi);
+ struct rtc_device *rtc = spi_get_drvdata(spi);
if (rtc)
rtc_device_unregister(rtc);
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
new file mode 100644
index 000000000000..46f14b82f3ab
--- /dev/null
+++ b/drivers/rtc/rtc-puv3.c
@@ -0,0 +1,359 @@
+/*
+ * RTC driver code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+static struct resource *puv3_rtc_mem;
+
+static int puv3_rtc_alarmno = IRQ_RTCAlarm;
+static int puv3_rtc_tickno = IRQ_RTC;
+
+static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
+
+/* IRQ Handlers */
+static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
+{
+ struct rtc_device *rdev = id;
+
+ writel(readl(RTC_RTSR) | RTC_RTSR_AL, RTC_RTSR);
+ rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t puv3_rtc_tickirq(int irq, void *id)
+{
+ struct rtc_device *rdev = id;
+
+ writel(readl(RTC_RTSR) | RTC_RTSR_HZ, RTC_RTSR);
+ rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+/* Update control registers */
+static void puv3_rtc_setaie(int to)
+{
+ unsigned int tmp;
+
+ pr_debug("%s: aie=%d\n", __func__, to);
+
+ tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE;
+
+ if (to)
+ tmp |= RTC_RTSR_ALE;
+
+ writel(tmp, RTC_RTSR);
+}
+
+static int puv3_rtc_setpie(struct device *dev, int enabled)
+{
+ unsigned int tmp;
+
+ pr_debug("%s: pie=%d\n", __func__, enabled);
+
+ spin_lock_irq(&puv3_rtc_pie_lock);
+ tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
+
+ if (enabled)
+ tmp |= RTC_RTSR_HZE;
+
+ writel(tmp, RTC_RTSR);
+ spin_unlock_irq(&puv3_rtc_pie_lock);
+
+ return 0;
+}
+
+/* Time read/write */
+static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
+
+ pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
+ rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
+ return 0;
+}
+
+static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long rtc_count = 0;
+
+ pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ rtc_tm_to_time(tm, &rtc_count);
+ writel(rtc_count, RTC_RCNR);
+
+ return 0;
+}
+
+static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *alm_tm = &alrm->time;
+
+ rtc_time_to_tm(readl(RTC_RTAR), alm_tm);
+
+ alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
+
+ pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+ alrm->enabled,
+ alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+
+ return 0;
+}
+
+static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *tm = &alrm->time;
+ unsigned long rtcalarm_count = 0;
+
+ pr_debug("puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+ alrm->enabled,
+ tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
+ tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+
+ rtc_tm_to_time(tm, &rtcalarm_count);
+ writel(rtcalarm_count, RTC_RTAR);
+
+ puv3_rtc_setaie(alrm->enabled);
+
+ if (alrm->enabled)
+ enable_irq_wake(puv3_rtc_alarmno);
+ else
+ disable_irq_wake(puv3_rtc_alarmno);
+
+ return 0;
+}
+
+static int puv3_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ seq_printf(seq, "periodic_IRQ\t: %s\n",
+ (readl(RTC_RTSR) & RTC_RTSR_HZE) ? "yes" : "no");
+ return 0;
+}
+
+static int puv3_rtc_open(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
+ IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev);
+
+ if (ret) {
+ dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
+ return ret;
+ }
+
+ ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
+ IRQF_DISABLED, "pkunity-rtc tick", rtc_dev);
+
+ if (ret) {
+ dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
+ goto tick_err;
+ }
+
+ return ret;
+
+ tick_err:
+ free_irq(puv3_rtc_alarmno, rtc_dev);
+ return ret;
+}
+
+static void puv3_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+
+ /* do not clear AIE here, it may be needed for wake */
+ puv3_rtc_setpie(dev, 0);
+ free_irq(puv3_rtc_alarmno, rtc_dev);
+ free_irq(puv3_rtc_tickno, rtc_dev);
+}
+
+static const struct rtc_class_ops puv3_rtcops = {
+ .open = puv3_rtc_open,
+ .release = puv3_rtc_release,
+ .read_time = puv3_rtc_gettime,
+ .set_time = puv3_rtc_settime,
+ .read_alarm = puv3_rtc_getalarm,
+ .set_alarm = puv3_rtc_setalarm,
+ .proc = puv3_rtc_proc,
+};
+
+static void puv3_rtc_enable(struct platform_device *pdev, int en)
+{
+ if (!en) {
+ writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
+ } else {
+ /* re-enable the device, and check it is ok */
+ if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
+ dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
+ writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
+ }
+ }
+}
+
+static int puv3_rtc_remove(struct platform_device *dev)
+{
+ struct rtc_device *rtc = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+ rtc_device_unregister(rtc);
+
+ puv3_rtc_setpie(&dev->dev, 0);
+ puv3_rtc_setaie(0);
+
+ release_resource(puv3_rtc_mem);
+ kfree(puv3_rtc_mem);
+
+ return 0;
+}
+
+static int puv3_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ int ret;
+
+ pr_debug("%s: probe=%p\n", __func__, pdev);
+
+ /* find the IRQs */
+ puv3_rtc_tickno = platform_get_irq(pdev, 1);
+ if (puv3_rtc_tickno < 0) {
+ dev_err(&pdev->dev, "no irq for rtc tick\n");
+ return -ENOENT;
+ }
+
+ puv3_rtc_alarmno = platform_get_irq(pdev, 0);
+ if (puv3_rtc_alarmno < 0) {
+ dev_err(&pdev->dev, "no irq for alarm\n");
+ return -ENOENT;
+ }
+
+ pr_debug("PKUnity_rtc: tick irq %d, alarm irq %d\n",
+ puv3_rtc_tickno, puv3_rtc_alarmno);
+
+ /* get the memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ return -ENOENT;
+ }
+
+ puv3_rtc_mem = request_mem_region(res->start,
+ res->end-res->start+1,
+ pdev->name);
+
+ if (puv3_rtc_mem == NULL) {
+ dev_err(&pdev->dev, "failed to reserve memory region\n");
+ ret = -ENOENT;
+ goto err_nores;
+ }
+
+ puv3_rtc_enable(pdev, 1);
+
+ /* register RTC and exit */
+ rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
+ THIS_MODULE);
+
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "cannot attach rtc\n");
+ ret = PTR_ERR(rtc);
+ goto err_nortc;
+ }
+
+ /* platform setup code should have handled this; sigh */
+ if (!device_can_wakeup(&pdev->dev))
+ device_init_wakeup(&pdev->dev, 1);
+
+ platform_set_drvdata(pdev, rtc);
+ return 0;
+
+ err_nortc:
+ puv3_rtc_enable(pdev, 0);
+ release_resource(puv3_rtc_mem);
+
+ err_nores:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+
+static int ticnt_save;
+
+static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* save RTAR for anyone using periodic interrupts */
+ ticnt_save = readl(RTC_RTAR);
+ puv3_rtc_enable(pdev, 0);
+ return 0;
+}
+
+static int puv3_rtc_resume(struct platform_device *pdev)
+{
+ puv3_rtc_enable(pdev, 1);
+ writel(ticnt_save, RTC_RTAR);
+ return 0;
+}
+#else
+#define puv3_rtc_suspend NULL
+#define puv3_rtc_resume NULL
+#endif
+
+static struct platform_driver puv3_rtcdrv = {
+ .probe = puv3_rtc_probe,
+ .remove = __devexit_p(puv3_rtc_remove),
+ .suspend = puv3_rtc_suspend,
+ .resume = puv3_rtc_resume,
+ .driver = {
+ .name = "PKUnity-v3-RTC",
+ .owner = THIS_MODULE,
+ }
+};
+
+static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
+
+static int __init puv3_rtc_init(void)
+{
+ printk(banner);
+ return platform_driver_register(&puv3_rtcdrv);
+}
+
+static void __exit puv3_rtc_exit(void)
+{
+ platform_driver_unregister(&puv3_rtcdrv);
+}
+
+module_init(puv3_rtc_init);
+module_exit(puv3_rtc_exit);
+
+MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
+MODULE_AUTHOR("Hu Dongliang");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index b8bc862903ae..efd6066b5cd2 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -78,7 +78,6 @@ struct vt8500_rtc {
void __iomem *regbase;
struct resource *res;
int irq_alarm;
- int irq_hz;
struct rtc_device *rtc;
spinlock_t lock; /* Protects this structure */
};
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
if (isr & 1)
events |= RTC_AF | RTC_IRQF;
- /* Only second/minute interrupts are supported */
- if (isr & 2)
- events |= RTC_UF | RTC_IRQF;
-
rtc_update_irq(vt8500_rtc->rtc, 1, events);
return IRQ_HANDLED;
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
- unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
-
- if (enabled)
- tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
- else
- tmp &= ~VT8500_RTC_CR_SM_ENABLE;
-
- writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
- return 0;
-}
-
static const struct rtc_class_ops vt8500_rtc_ops = {
.read_time = vt8500_rtc_read_time,
.set_time = vt8500_rtc_set_time,
.read_alarm = vt8500_rtc_read_alarm,
.set_alarm = vt8500_rtc_set_alarm,
.alarm_irq_enable = vt8500_alarm_irq_enable,
- .update_irq_enable = vt8500_update_irq_enable,
};
static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_free;
}
- vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
- if (vt8500_rtc->irq_hz < 0) {
- dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
- ret = -ENXIO;
- goto err_free;
- }
-
vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
resource_size(vt8500_rtc->res),
"vt8500-rtc");
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_release;
}
- /* Enable the second/minute interrupt generation and enable RTC */
- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
- | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
+ /* Enable RTC and set it to 24-hour mode */
+ writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
vt8500_rtc->regbase + VT8500_RTC_CR);
vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_unmap;
}
- ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
- "rtc 1Hz", vt8500_rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get irq %i, err %d\n",
- vt8500_rtc->irq_hz, ret);
- goto err_unreg;
- }
-
ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
"rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
- goto err_free_hz;
+ goto err_unreg;
}
return 0;
-err_free_hz:
- free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
err_unreg:
rtc_device_unregister(vt8500_rtc->rtc);
err_unmap:
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
- free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
rtc_device_unregister(vt8500_rtc->rtc);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 85dddb1e4126..46784b83c5c4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,7 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/vtoc.h>
#include <asm/diag.h>
@@ -642,7 +642,7 @@ dasd_diag_init(void)
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
register_external_interrupt(0x2603, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
@@ -652,7 +652,7 @@ static void __exit
dasd_diag_cleanup(void)
{
unregister_external_interrupt(0x2603, dasd_ext_handler);
- ctl_clear_bit(0, 9);
+ service_subclass_irq_unregister();
dasd_diag_discipline_pointer = NULL;
}
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b76c61f82485..eaa7e78186f9 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -19,7 +19,6 @@
#include <linux/suspend.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
-#include <asm/s390_ext.h>
#include <asm/types.h>
#include <asm/irq.h>
@@ -885,12 +884,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- ctl_clear_bit(0,9);
+ service_subclass_irq_unregister();
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1070,7 +1069,7 @@ sclp_init(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
sclp_init_mask(1);
return 0;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 55e8f721e38a..570d4da10696 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -416,7 +416,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
- (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
+ (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
@@ -427,8 +427,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].flags & 0xff,
- q->sbal[q->first_to_check]->element[15].flags & 0xff);
+ q->sbal[q->first_to_check]->element[14].sflags,
+ q->sbal[q->first_to_check]->element[15].sflags);
/*
* Interrupts may be avoided as long as the error is present
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 607998f0b7d8..aec60d55b10d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -25,7 +25,6 @@
#include <asm/kvm_para.h>
#include <asm/kvm_virtio.h>
#include <asm/setup.h>
-#include <asm/s390_ext.h>
#include <asm/irq.h>
#define VIRTIO_SUBCODE_64 0x0D00
@@ -441,7 +440,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices);
- ctl_set_bit(0, 9);
+ service_subclass_irq_register();
register_external_interrupt(0x2603, kvm_extint_handler);
scan_devices();
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 55c6aa1c9704..d3cee33e554c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -361,7 +361,7 @@ enum qeth_header_ids {
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
- return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
+ return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}
enum qeth_qdio_buffer_states {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 503678a30981..dd08f7b42fb8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -890,7 +890,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct sk_buff *skb;
/* is PCI flag set on buffer? */
- if (buf->buffer->element[0].flags & 0x40)
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
atomic_dec(&queue->set_pci_flags_count);
skb = skb_dequeue(&buf->skb_list);
@@ -906,9 +906,11 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
buf->is_header[i] = 0;
buf->buffer->element[i].length = 0;
buf->buffer->element[i].addr = NULL;
- buf->buffer->element[i].flags = 0;
+ buf->buffer->element[i].eflags = 0;
+ buf->buffer->element[i].sflags = 0;
}
- buf->buffer->element[15].flags = 0;
+ buf->buffer->element[15].eflags = 0;
+ buf->buffer->element[15].sflags = 0;
buf->next_element_to_fill = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
@@ -2368,9 +2370,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
buf->buffer->element[i].length = PAGE_SIZE;
buf->buffer->element[i].addr = pool_entry->elements[i];
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
- buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
+ buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
- buf->buffer->element[i].flags = 0;
+ buf->buffer->element[i].eflags = 0;
+ buf->buffer->element[i].sflags = 0;
}
return 0;
}
@@ -2718,11 +2721,11 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
if (qdio_error) {
QETH_CARD_TEXT(card, 2, dbftext);
QETH_CARD_TEXT_(card, 2, " F15=%02X",
- buf->element[15].flags & 0xff);
+ buf->element[15].sflags);
QETH_CARD_TEXT_(card, 2, " F14=%02X",
- buf->element[14].flags & 0xff);
+ buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
- if ((buf->element[15].flags & 0xff) == 0x12) {
+ if ((buf->element[15].sflags) == 0x12) {
card->stats.rx_dropped++;
return 0;
} else
@@ -2798,7 +2801,7 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
static int qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
- int sbalf15 = buffer->buffer->element[15].flags & 0xff;
+ int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2907,8 +2910,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
for (i = index; i < index + count; ++i) {
buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- buf->buffer->element[buf->next_element_to_fill - 1].flags |=
- SBAL_FLAGS_LAST_ENTRY;
+ buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
+ SBAL_EFLAGS_LAST_ENTRY;
if (queue->card->info.type == QETH_CARD_TYPE_IQD)
continue;
@@ -2921,7 +2924,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* it's likely that we'll go to packing
* mode soon */
atomic_inc(&queue->set_pci_flags_count);
- buf->buffer->element[0].flags |= 0x40;
+ buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
} else {
if (!atomic_read(&queue->set_pci_flags_count)) {
@@ -2934,7 +2937,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
* further send was requested by the stack
*/
atomic_inc(&queue->set_pci_flags_count);
- buf->buffer->element[0].flags |= 0x40;
+ buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
}
@@ -3180,20 +3183,20 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
if (!length) {
if (first_lap)
if (skb_shinfo(skb)->nr_frags)
- buffer->element[element].flags =
- SBAL_FLAGS_FIRST_FRAG;
+ buffer->element[element].eflags =
+ SBAL_EFLAGS_FIRST_FRAG;
else
- buffer->element[element].flags = 0;
+ buffer->element[element].eflags = 0;
else
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
+ buffer->element[element].eflags =
+ SBAL_EFLAGS_MIDDLE_FRAG;
} else {
if (first_lap)
- buffer->element[element].flags =
- SBAL_FLAGS_FIRST_FRAG;
+ buffer->element[element].eflags =
+ SBAL_EFLAGS_FIRST_FRAG;
else
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
+ buffer->element[element].eflags =
+ SBAL_EFLAGS_MIDDLE_FRAG;
}
data += length_here;
element++;
@@ -3205,12 +3208,12 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
buffer->element[element].addr = (char *)page_to_phys(frag->page)
+ frag->page_offset;
buffer->element[element].length = frag->size;
- buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
+ buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
element++;
}
- if (buffer->element[element - 1].flags)
- buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
+ if (buffer->element[element - 1].eflags)
+ buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
*next_element_to_fill = element;
}
@@ -3234,7 +3237,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
- buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
+ buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
skb->data += hdr_len;
skb->len -= hdr_len;
@@ -3246,7 +3249,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buffer->element[element].addr = hdr;
buffer->element[element].length = sizeof(struct qeth_hdr) +
hd_len;
- buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
+ buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->is_header[element] = 1;
buf->next_element_to_fill++;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 8512b5c0ef82..022fb6a8cb83 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -640,7 +640,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
}
static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
- u32 fsf_cmd, u32 sbtype,
+ u32 fsf_cmd, u8 sbtype,
mempool_t *pool)
{
struct zfcp_adapter *adapter = qdio->adapter;
@@ -841,7 +841,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
@@ -1012,7 +1012,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
- SBAL_FLAGS0_TYPE_WRITE_READ, pool);
+ SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1110,7 +1110,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
- SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
+ SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1156,7 +1156,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1198,7 +1198,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
goto out_unlock;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
- SBAL_FLAGS0_TYPE_READ, NULL);
+ SBAL_SFLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -1250,7 +1250,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1296,7 +1296,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
goto out_unlock;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
- SBAL_FLAGS0_TYPE_READ, NULL);
+ SBAL_SFLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -1412,7 +1412,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1478,7 +1478,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1553,7 +1553,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1606,7 +1606,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1698,7 +1698,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1812,7 +1812,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1901,7 +1901,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
- SBAL_FLAGS0_TYPE_READ,
+ SBAL_SFLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -2161,7 +2161,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
{
struct zfcp_fsf_req *req;
struct fcp_cmnd *fcp_cmnd;
- unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
+ u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
int real_bytes, retval = -EIO, dix_bytes = 0;
struct scsi_device *sdev = scsi_cmnd->device;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -2181,7 +2181,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
}
if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
- sbtype = SBAL_FLAGS0_TYPE_WRITE;
+ sbtype = SBAL_SFLAGS0_TYPE_WRITE;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
sbtype, adapter->pool.scsi_req);
@@ -2280,7 +2280,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
- SBAL_FLAGS0_TYPE_WRITE,
+ SBAL_SFLAGS0_TYPE_WRITE,
qdio->adapter->pool.scsi_req);
if (IS_ERR(req)) {
@@ -2328,17 +2328,18 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
- int direction, retval = -EIO, bytes;
+ int retval = -EIO, bytes;
+ u8 direction;
if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
return ERR_PTR(-EOPNOTSUPP);
switch (fsf_cfdc->command) {
case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
- direction = SBAL_FLAGS0_TYPE_WRITE;
+ direction = SBAL_SFLAGS0_TYPE_WRITE;
break;
case FSF_QTCB_UPLOAD_CONTROL_FILE:
- direction = SBAL_FLAGS0_TYPE_READ;
+ direction = SBAL_SFLAGS0_TYPE_READ;
break;
default:
return ERR_PTR(-EINVAL);
@@ -2413,7 +2414,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
fsf_req->qdio_req.sbal_response = sbal_idx;
zfcp_fsf_req_complete(fsf_req);
- if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
+ if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
break;
}
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 98e97d90835b..d9c40ea73eef 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -124,7 +124,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+ sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
/* don't exceed last allowed SBAL */
if (q_req->sbal_last == q_req->sbal_limit)
@@ -132,7 +132,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/* set chaining flag in first SBALE of current SBAL */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
+ sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
/* calculate index of next SBAL */
q_req->sbal_last++;
@@ -147,7 +147,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= q_req->sbtype;
+ sbale->sflags |= q_req->sbtype;
return sbale;
}
@@ -177,7 +177,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->flags |= q_req->sbtype;
+ sbale->sflags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
sbale = zfcp_qdio_sbale_next(qdio, q_req);
@@ -384,7 +384,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(qdio->res_q[cc]->element[0]);
sbale->length = 0;
- sbale->flags = SBAL_FLAGS_LAST_ENTRY;
+ sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
+ sbale->sflags = 0;
sbale->addr = NULL;
}
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2297d8d3e947..54e22ace012b 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -67,7 +67,7 @@ struct zfcp_qdio {
* @qdio_outb_usage: usage of outbound queue
*/
struct zfcp_qdio_req {
- u32 sbtype;
+ u8 sbtype;
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
@@ -116,7 +116,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
*/
static inline
void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long req_id, u32 sbtype, void *data, u32 len)
+ unsigned long req_id, u8 sbtype, void *data, u32 len)
{
struct qdio_buffer_element *sbale;
int count = min(atomic_read(&qdio->req_q_free),
@@ -131,7 +131,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->addr = (void *) req_id;
- sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
+ sbale->eflags = 0;
+ sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
if (unlikely(!data))
return;
@@ -173,7 +174,7 @@ void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
struct qdio_buffer_element *sbale;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+ sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
}
/**
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d2dab4..d5ff142c93a2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
flash_error_table[i].reason);
}
-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
asd_show_update_bios, asd_store_update_bios);
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c49196f..6c7e0339dda4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08b0e7f..c85182a704fb 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
bfa_boolean_t msix);
void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713185b6..89ae4c8f95a2 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+ hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
}
/*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+ return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
* Cleanup hw semaphore and usecnt registers
*/
static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129ddfcd..93612520f0d2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
writel(1, ioc->ioc_regs.ioc_sem_reg);
}
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+ uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return BFA_TRUE;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+
/*
* Synchronized IOC failure processing routines
*/
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd59023227b..6bdd25a93db9 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
#define BD_SPLIT_SIZE 32768
/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
-#define BNX2I_SQ_WQES_MIN 16
-#define BNX2I_570X_SQ_WQES_MAX 128
-#define BNX2I_5770X_SQ_WQES_MAX 512
-#define BNX2I_570X_SQ_WQES_DEFAULT 128
-#define BNX2I_5770X_SQ_WQES_DEFAULT 256
+#define BNX2I_SQ_WQES_MIN 16
+#define BNX2I_570X_SQ_WQES_MAX 128
+#define BNX2I_5770X_SQ_WQES_MAX 512
+#define BNX2I_570X_SQ_WQES_DEFAULT 128
+#define BNX2I_5770X_SQ_WQES_DEFAULT 128
#define BNX2I_570X_CQ_WQES_MAX 128
#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
#define BNX2X_MAX_CQS 8
#define CNIC_ARM_CQE 1
+#define CNIC_ARM_CQE_FP 2
#define CNIC_DISARM_CQE 0
#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
* after HBA reset is completed by bnx2i/cnic/bnx2
* modules
* @state: tracks offload connection state machine
- * @teardown_mode: indicates if conn teardown is abortive or orderly
+ * @timestamp: tracks the start time when the ep begins to connect
+ * @num_active_cmds: tracks the number of outstanding commands for this ep
+ * @ec_shift: the amount of shift as part of the event coal calc
* @qp: QP information
* @ids: contains chip allocated *context id* & driver assigned
* *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
u32 state;
unsigned long timestamp;
int num_active_cmds;
+ u32 ec_shift;
struct qp_info qp;
struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b89513faed..5c54a2d9b834 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
u16 next_index;
u32 num_active_cmds;
-
/* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
* interrupts and other unwanted results
*/
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
- if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
- return;
- if (action == CNIC_ARM_CQE) {
+ if (action != CNIC_ARM_CQE_FP)
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return;
+
+ if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
num_active_cmds = ep->num_active_cmds;
if (num_active_cmds <= event_coal_min)
next_index = 1;
else
next_index = event_coal_min +
- (num_active_cmds - event_coal_min) / event_coal_div;
+ ((num_active_cmds - event_coal_min) >>
+ ep->ec_shift);
if (!next_index)
next_index = 1;
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
iscsi_init.dummy_buffer_addr_hi =
(u32) ((u64) hba->dummy_buf_dma >> 32);
+ hba->num_ccell = hba->max_sqes >> 1;
hba->ctx_ccell_tasks =
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
qp->cq_cons_idx++;
}
}
- bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
}
/**
@@ -1948,22 +1950,23 @@ cqe_out:
static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
struct iscsi_kcqe *new_cqe_kcqe)
{
- struct bnx2i_conn *conn;
+ struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
- conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+ bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
- if (!conn) {
+ if (!bnx2i_conn) {
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
return;
}
- if (!conn->ep) {
+ if (!bnx2i_conn->ep) {
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
return;
}
-
- bnx2i_process_new_cqes(conn);
+ bnx2i_process_new_cqes(bnx2i_conn);
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
+ bnx2i_process_new_cqes(bnx2i_conn);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a2819736..6adbdc34a9a5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
wait_event_interruptible_timeout(hba->eh_wait,
(list_empty(&hba->ep_ofld_list) &&
list_empty(&hba->ep_destroy_list)),
- 10 * HZ);
+ 2 * HZ);
/* Wait for all endpoints to be torn down, Chip will be reset once
* control returns to network driver. So it is required to cleanup and
* release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9ccc4ce..041928b23cb0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
{
struct iscsi_endpoint *ep;
struct bnx2i_endpoint *bnx2i_ep;
+ u32 ec_div;
ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->hba = hba;
bnx2i_ep->hba_age = hba->age;
+
+ ec_div = event_coal_div;
+ while (ec_div >>= 1)
+ bnx2i_ep->ec_shift += 1;
+
hba->ofld_conns_active++;
init_waitqueue_head(&bnx2i_ep->ofld_wait);
return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
mutex_init(&hba->net_dev_lock);
init_waitqueue_head(&hba->eh_wait);
if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->hba_shutdown_tmo = 20 * HZ;
+ hba->hba_shutdown_tmo = 30 * HZ;
hba->conn_teardown_tmo = 20 * HZ;
hba->conn_ctx_destroy_tmo = 6 * HZ;
} else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_cmd *cmd = task->dd_data;
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+ if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+ return -ENOMEM;
+
/*
* If there is no scsi_cmnd this must be a mgmt task
*/
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
- .cmd_per_lun = 32,
+ .cmd_per_lun = 24,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9480b2..155d7b9bdeae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static int fcoe_validate_vport_create(struct fc_vport *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
+ int rc;
+ char buf[32];
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ printk(KERN_ERR "fcoe: Failed to create vport, "
+ "WWPN (0x%s) already exists\n",
+ buf);
+ return rc;
+ }
mutex_lock(&fcoe_config_mutex);
vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
}
+
+/**
+ * fcoe_validate_vport_create() - Validate a vport before creating it
+ * @vport: NPIV port to be created
+ *
+ * This routine is meant to add validation for a vport before creating it
+ * via fcoe_vport_create().
+ * Current validations are:
+ * - WWPN supplied is unique for given lport
+ *
+ *
+*/
+static int fcoe_validate_vport_create(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ int rc = 0;
+ char buf[32];
+
+ mutex_lock(&n_port->lp_mutex);
+
+ wwn_to_str(vport->port_name, buf, sizeof(buf));
+ /* Check if the wwpn is not same as that of the lport */
+ if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport WWPN 0x%s is same as that of the "
+ "base port WWPN\n", buf);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check if there is any existing vport with same wwpn */
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+ FCOE_DBG("vport with given WWPN 0x%s already "
+ "exists\n", buf);
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ mutex_unlock(&n_port->lp_mutex);
+
+ return rc;
+}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd78fb4..c4a93993c0cf 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
+static inline void wwn_to_str(u64 wwn, char *buf, int len)
+{
+ u8 wwpn[8];
+
+ u64_to_wwn(wwn, wwpn);
+ snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+ wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+ wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+}
+
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af5508a..c74c4b8e71ef 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
struct fc_lport *lport = fip->lp;
struct fc_lport *vn_port = NULL;
u32 desc_mask;
- int is_vn_port = 0;
+ int num_vlink_desc;
+ int reset_phys_port = 0;
+ struct fip_vn_desc **vlink_desc_arr = NULL;
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* mask of required descriptors. Validating each one clears its bit.
*/
- desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
desc = (struct fip_desc *)(fh + 1);
+
+ /*
+ * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
+ * before determining max Vx_Port descriptor but a buggy FCF could have
+ * omited either or both MAC Address and Name Identifier descriptors
+ */
+ num_vlink_desc = rlen / sizeof(*vp);
+ if (num_vlink_desc)
+ vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
+ GFP_ATOMIC);
+ if (!vlink_desc_arr)
+ return;
+ num_vlink_desc = 0;
+
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW;
if (dlen > rlen)
- return;
+ goto err;
/* Drop CVL if there are duplicate critical descriptors */
if ((desc->fip_dtype < 32) &&
+ (desc->fip_dtype != FIP_DT_VN_ID) &&
!(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
"Descriptors in FIP CVL\n");
- return;
+ goto err;
}
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
if (dlen < sizeof(*mp))
- return;
+ goto err;
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_MAC);
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
if (dlen < sizeof(*wp))
- return;
+ goto err;
if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
- return;
+ goto err;
desc_mask &= ~BIT(FIP_DT_NAME);
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
if (dlen < sizeof(*vp))
- return;
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(lport)) == 0 &&
- get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
- ntoh24(vp->fd_fc_id) == lport->port_id) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- break;
+ goto err;
+ vlink_desc_arr[num_vlink_desc++] = vp;
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (vn_port && (vn_port == lport)) {
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
}
- /* check if clr_vlink is for NPIV port */
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vn_port, &lport->vports, list) {
- if (compare_ether_addr(vp->fd_mac,
- fip->get_src_addr(vn_port)) == 0 &&
- (get_unaligned_be64(&vp->fd_wwpn)
- == vn_port->wwpn) &&
- (ntoh24(vp->fd_fc_id) ==
- fc_host_port_id(vn_port->host))) {
- desc_mask &= ~BIT(FIP_DT_VN_ID);
- is_vn_port = 1;
- break;
- }
- }
- mutex_unlock(&lport->lp_mutex);
-
break;
default:
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
- return;
+ goto err;
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
/*
* reset only if all required descriptors were present and valid.
*/
- if (desc_mask) {
+ if (desc_mask)
LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
desc_mask);
+ else if (!num_vlink_desc) {
+ LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
+ /*
+ * No Vx_Port description. Clear all NPIV ports,
+ * followed by physical port
+ */
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
+ mutex_lock(&fip->ctlr_mutex);
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->VLinkFailureCount++;
+ put_cpu();
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ fc_lport_reset(fip->lp);
+ fcoe_ctlr_solicit(fip, NULL);
} else {
- LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ int i;
- if (is_vn_port)
- fc_lport_reset(vn_port);
- else {
- mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->dev_stats,
- get_cpu())->VLinkFailureCount++;
- put_cpu();
- fcoe_ctlr_reset(fip);
- mutex_unlock(&fip->ctlr_mutex);
+ LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
+ for (i = 0; i < num_vlink_desc; i++) {
+ vp = vlink_desc_arr[i];
+ vn_port = fc_vport_id_lookup(lport,
+ ntoh24(vp->fd_fc_id));
+ if (!vn_port)
+ continue;
+
+ /*
+ * 'port_id' is already validated, check MAC address and
+ * wwpn
+ */
+ if (compare_ether_addr(fip->get_src_addr(vn_port),
+ vp->fd_mac) != 0 ||
+ get_unaligned_be64(&vp->fd_wwpn) !=
+ vn_port->wwpn)
+ continue;
+
+ if (vn_port == lport)
+ /*
+ * Physical port, defer processing till all
+ * listed NPIV ports are cleared
+ */
+ reset_phys_port = 1;
+ else /* NPIV port */
+ fc_lport_reset(vn_port);
+ }
+ if (reset_phys_port) {
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
}
}
+
+err:
+ kfree(vlink_desc_arr);
}
/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c8569e..41068e8748e7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
struct fcoe_transport *ft = NULL;
enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
-#ifdef CONFIG_LIBFCOE_MODULE
- /*
- * Make sure the module has been initialized, and is not about to be
- * removed. Module parameter sysfs files are writable before the
- * module_init function is called and after module_exit.
- */
- if (THIS_MODULE->state != MODULE_STATE_LIVE)
- goto out_nodev;
-#endif
-
mutex_lock(&ft_mutex);
netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca46110..888086c4e709 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
+ num_hrrq = 0;
do {
writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
- if (int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
- }
-
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
+ } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
+ int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
} else
break;
}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b2736cafa..b9cb8140b398 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
default:
FC_DISC_DBG(disc, "Received an unsupported request, "
"the opcode is (%x)\n", op);
+ fc_frame_free(fp);
break;
}
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a746f60..3b8a6451ea28 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock);
+ resp = ep->resp;
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
+ fc_fcp_ddp_done(fr_fsp(fp));
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a4720a771..9cd2149519ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
* DDP related resources for a fcp_pkt
* @fsp: The FCP packet that DDP had been used on
*/
-static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport;
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
error = lport->tt.seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
- fc_fcp_retry_cmd(fsp);
- return 0;
+ return error;
}
fp = NULL;
}
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
FC_FCTL_REQ, 0);
rec_tov = get_fsp_rec_tov(fsp);
- seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
+ seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
fsp, jiffies_to_msecs(rec_tov));
if (!seq)
goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
return;
}
- fsp->recov_seq = NULL;
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
}
fc_fcp_unlock_pkt(fsp);
- fsp->lp->tt.exch_done(seq);
out:
+ fsp->lp->tt.exch_done(seq);
fc_frame_free(fp);
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
}
/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
if (fc_fcp_lock_pkt(fsp))
goto out;
- fsp->lp->tt.exch_done(fsp->recov_seq);
- fsp->recov_seq = NULL;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
}
fc_fcp_unlock_pkt(fsp);
out:
- fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+ fsp->lp->tt.exch_done(fsp->recov_seq);
}
/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819d70c0..c7d071289af5 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
/*
* Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f4d831..db9238f2ecb8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
struct sas_ha_struct *sas_ha;
enum ata_completion_errors ac;
unsigned long flags;
+ struct ata_link *link;
if (!qc)
goto qc_already_gone;
dev = qc->ap->private_data;
sas_ha = dev->port->ha;
+ link = &dev->sata_dev.ap->link;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+
+ if (!link->sactive) {
+ qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ } else {
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
- qc->err_mask = ac;
+ if (!link->sactive) {
+ qc->err_mask = ac;
+ } else {
+ link->eh_info.err_mask |= AC_ERR_DEV;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
dev->sata_dev.tf.feature = 0x04; /* status err */
dev->sata_dev.tf.command = ATA_ERR;
}
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
return ret;
}
+static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+ int res = TMF_RESP_FUNC_FAILED;
+ int ret = 0;
+
+ if (i->dft->lldd_ata_soft_reset)
+ res = i->dft->lldd_ata_soft_reset(dev);
+
+ if (res != TMF_RESP_FUNC_COMPLETE) {
+ SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
+ ret = -EAGAIN;
+ }
+
+ switch (dev->sata_dev.command_set) {
+ case ATA_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATA device.\n", __func__);
+ *class = ATA_DEV_ATA;
+ break;
+ case ATAPI_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
+ *class = ATA_DEV_ATAPI;
+ break;
+ default:
+ SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
+ __func__, dev->sata_dev.command_set);
+ *class = ATA_DEV_UNKNOWN;
+ break;
+ }
+
+ ap->cbl = ATA_CBL_SATA;
+ return ret;
+}
+
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
- .softreset = NULL,
+ .softreset = sas_ata_soft_reset,
.hardreset = sas_ata_hard_reset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd1ff2b..14e21b5fb8ba 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
-void sas_deform_port(struct asd_sas_phy *phy);
+void sas_deform_port(struct asd_sas_phy *phy, int gone);
void sas_porte_bytes_dmaed(struct work_struct *work);
void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b635b1..e0f5018e9071 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
&phy->phy_events_pending);
phy->error = 0;
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
&phy->phy_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdfe699a..42fd1f25b664 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
- sas_deform_port(phy);
+ sas_deform_port(phy, 0);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
-void sas_deform_port(struct asd_sas_phy *phy)
+void sas_deform_port(struct asd_sas_phy *phy, int gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
+ struct domain_device *dev;
unsigned long flags;
if (!port)
return; /* done by a phy event */
- if (port->port_dev)
- port->port_dev->pathways--;
+ dev = port->port_dev;
+ if (dev)
+ dev->pathways--;
if (port->num_phys == 1) {
+ if (dev && gone)
+ dev->gone = 1;
sas_unregister_domain_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else
sas_port_delete_phy(port->port, phy->phy);
-
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
&phy->port_events_pending);
- sas_deform_port(phy);
+ sas_deform_port(phy, 1);
}
/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
- sas_deform_port(sas_ha->sas_phy[i]);
+ sas_deform_port(sas_ha->sas_phy[i], 0);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f40917..eeba76cdf774 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
+ /* If the device fell off, no sense in issuing commands */
+ if (dev->gone) {
+ cmd->result = DID_BAD_TARGET << 16;
+ scsi_done(cmd);
+ goto out;
+ }
+
if (dev_is_sata(dev)) {
unsigned long flags;
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
goto out;
}
- /* If the device fell off, no sense in issuing commands */
- if (dev->gone) {
- cmd->result = DID_BAD_TARGET << 16;
- scsi_done(cmd);
- goto out;
- }
-
res = -ENOMEM;
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534f..8ec2c86a49d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
(1 << LPFC_USER_LINK_SPEED_AUTO))
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+enum nemb_type {
+ nemb_mse = 1,
+ nemb_hbd
+};
+
+enum mbox_type {
+ mbox_rd = 1,
+ mbox_wr
+};
+
+enum dma_type {
+ dma_mbox = 1,
+ dma_ebuf
+};
+
+enum sta_type {
+ sta_pre_addr = 1,
+ sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+ uint32_t state;
+#define LPFC_BSG_MBOX_IDLE 0
+#define LPFC_BSG_MBOX_HOST 1
+#define LPFC_BSG_MBOX_PORT 2
+#define LPFC_BSG_MBOX_DONE 3
+#define LPFC_BSG_MBOX_ABTS 4
+ enum nemb_type nembType;
+ enum mbox_type mboxType;
+ uint32_t numBuf;
+ uint32_t mbxTag;
+ uint32_t seqNum;
+ struct lpfc_dmabuf *mbx_dmabuf;
+ struct list_head ext_dmabuf_list;
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
MAILBOX_t *mbox;
uint32_t *mbox_ext;
+ struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
+ uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
int brd_no; /* FC board number */
-
char SerialNumber[32]; /* adapter Serial Number */
char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
uint16_t vpi_base;
uint16_t vfi_base;
unsigned long *vpi_bmask; /* vpi allocation table */
+ uint16_t *vpi_ids;
+ uint16_t vpi_count;
+ struct list_head lpfc_vpi_blk_list;
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff673..135a53baa735 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+ struct completion online_compl;
+ uint32_t reg_val;
+ int status = 0;
+ int rc;
+
+ if (!phba->cfg_enable_hba_reset)
+ return -EIO;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+ if (status != 0)
+ return status;
+
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
+
+ reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+
+ if (opcode == LPFC_FW_DUMP)
+ reg_val |= LPFC_FW_DUMP_REQUEST;
+ else if (opcode == LPFC_FW_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+ else if (opcode == LPFC_DV_RESET)
+ reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+ writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET);
+ /* flush */
+ readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* delay driver action following IF_TYPE_2 reset */
+ msleep(100);
+
+ init_completion(&online_compl);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* lpfc_nport_evt_cnt_show - Return the number of nport events
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+ else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+ else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+ else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+ status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
return -EINVAL;
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ LPFC_MBOXQ_t *mboxq;
+ struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
+ struct lpfc_rsrc_desc_pcie *desc;
+ uint32_t max_nr_virtfn;
+ uint32_t desc_count;
+ int length, rc, i;
+
+ if ((phba->sli_rev < LPFC_SLI_REV4) ||
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2))
+ return -EPERM;
+
+ if (!pdev->is_physfn)
+ return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ /* get the maximum number of virtfn support by physfn */
+ length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
+ phba->sli4_hba.iov.pf_number + 1);
+
+ get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
+ bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
+ LPFC_CFG_TYPE_CURRENT_ACTIVE);
+
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+
+ if (rc != MBX_TIMEOUT) {
+ /* check return status */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (shdr_status || shdr_add_status || rc)
+ goto error_out;
+
+ } else
+ goto error_out;
+
+ desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_pcie *)
+ &get_prof_cfg->u.response.prof_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_PCIE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
+ desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+ }
+
+error_out:
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+ lpfc_sriov_hw_max_virtfn_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing enable or disable aer flag.
* @count: unused variable.
*
* Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
/**
* lpfc_aer_support_init - Set the initial adapters aer support flag
* @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @val: enable aer or disable aer flag.
*
* Description:
* If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
* lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
- * @buf: containing the string "selective".
+ * @buf: containing flag 1 for aer cleanup state.
* @count: unused variable.
*
* Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
lpfc_aer_cleanup_state);
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct pci_dev *pdev = phba->pcidev;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ /* Request disabling virtual functions */
+ if (val == 0) {
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ return strlen(buf);
+ }
+
+ /* Request enabling virtual functions */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3018 There are %d virtual functions "
+ "enabled on physical function.\n",
+ phba->cfg_sriov_nr_virtfn);
+ return -EEXIST;
+ }
+
+ if (val <= LPFC_MAX_VFN_PER_PFN)
+ phba->cfg_sriov_nr_virtfn = val;
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3019 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+ }
+
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ phba->cfg_sriov_nr_virtfn = 0;
+ rc = -EPERM;
+ } else
+ rc = strlen(buf);
+
+ return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+ phba->cfg_sriov_nr_virtfn = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3017 Enabling %d virtual functions is not "
+ "allowed.\n", val);
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+ lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
+ &dev_attr_lpfc_sriov_hw_max_virtfn,
NULL,
};
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
- .size = MAILBOX_CMD_SIZE,
+ .size = MAILBOX_SYSFS_MAX,
.read = sysfs_mbox_read,
.write = sysfs_mbox_write,
};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
+ lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
- struct lpfc_dmabuf *rxbmp; /* for BIU diags */
- struct lpfc_dmabufext *dmp; /* for BIU diags */
+ struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
uint8_t *ext; /* extended mailbox data */
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
}
icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
} else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
}
/**
- * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
*
- * This function is responsible for placing a port into diagnostic loopback
- * mode in order to perform a diagnostic loopback test.
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i = 0;
+
+ psli = &phba->sli;
+ if (!psli)
+ return -ENODEV;
+
+ pring = &psli->ring[LPFC_FCP_RING];
+ if (!pring)
+ return -ENODEV;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ return -EACCES;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (pring->txcmplq_cnt) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+ msleep(10);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+ return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
* All new scsi requests are blocked, a small delay is used to allow the
* scsi requests to complete then the link is brought down. If the link is
* is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
* All of this is done in-line.
*/
static int
-lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
- struct Scsi_Host *shost = job->shost;
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
struct diag_mode_set *loopback_mode;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
uint32_t link_flags;
uint32_t timeout;
- struct lpfc_vport **vports;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus;
int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
- if (job->request_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2738 Received DIAG MODE request below minimum "
- "size\n");
+ "2738 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
rc = -EINVAL;
goto job_error;
}
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
- rc = -EACCES;
- goto job_error;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
- goto job_error;
- }
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_block_requests(shost);
- }
-
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_block_requests(shost);
+ goto loopback_mode_exit;
}
-
- while (pring->txcmplq_cnt) {
- if (i++ > 500) /* wait up to 5 seconds */
- break;
-
- msleep(10);
- }
-
memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
rc = -ENODEV;
loopback_mode_exit:
- vports = lpfc_create_vport_work_array(phba);
- if (vports) {
- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- scsi_unblock_requests(shost);
+ lpfc_bsg_diag_mode_exit(phba);
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_mbx_set_link_diag_state *link_diag_state;
+ uint32_t req_len, alloc_len;
+ int mbxstatus = MBX_SUCCESS, rc;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_state_set_out;
+ }
+ link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+ bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+ phba->sli4_hba.link_state.type);
+ if (diag)
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 1);
+ else
+ bf_set(lpfc_mbx_set_diag_state_diag,
+ &link_diag_state->u.req, 0);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+ rc = 0;
+ else
+ rc = -ENODEV;
+
+link_diag_state_set_out:
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct diag_mode_set *loopback_mode;
+ uint32_t link_flags, timeout, req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ int mbxstatus, i, rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3011 Received DIAG MODE request size:%d "
+ "below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_mode_set)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ /* bring the link to diagnostic mode */
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+ if (rc)
+ goto loopback_mode_exit;
+
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+ msleep(10);
+ }
+ /* set up loopback mode */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto loopback_mode_exit;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
+ if (link_flags == INTERNAL_LOOP_BACK)
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ else
+ bf_set(lpfc_mbx_set_diag_lpbk_type,
+ &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ /* wait for the link attention interrupt */
+ msleep(100);
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+ msleep(10);
}
- lpfc_destroy_vport_work_array(phba, vports);
- } else {
- shost = lpfc_shost_from_vport(phba->pport);
- scsi_unblock_requests(shost);
}
+loopback_mode_exit:
+ lpfc_bsg_diag_mode_exit(phba);
+
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1622,6 +1846,234 @@ job_error:
}
/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2)
+ rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+ else
+ rc = -ENODEV;
+
+ return rc;
+
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ int rc;
+
+ shost = job->shost;
+ if (!shost)
+ return -ENODEV;
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport)
+ return -ENODEV;
+ phba = vport->phba;
+ if (!phba)
+ return -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return -ENODEV;
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (!rc)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmboxq;
+ struct sli4_link_diag *link_diag_test_cmd;
+ uint32_t req_len, alloc_len;
+ uint32_t timeout;
+ struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct diag_status *diag_status_reply;
+ int mbxstatus, rc = 0;
+
+ shost = job->shost;
+ if (!shost) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ vport = (struct lpfc_vport *)job->shost->hostdata;
+ if (!vport) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ phba = vport->phba;
+ if (!phba) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3013 Received LINK DIAG TEST request "
+ " size:%d below the minimum size:%d\n",
+ job->request_len,
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct sli4_link_diag)));
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_diag_mode_enter(phba, job);
+ if (rc)
+ goto job_error;
+
+ link_diag_test_cmd = (struct sli4_link_diag *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = link_diag_test_cmd->timeout * 100;
+
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+ if (rc)
+ goto job_error;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ rc = -ENOMEM;
+ goto link_diag_test_exit;
+ }
+ run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+ bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.number);
+ bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+ phba->sli4_hba.link_state.type);
+ bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_id);
+ bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+ link_diag_test_cmd->loops);
+ bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+ link_diag_test_cmd->test_version);
+ bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+ link_diag_test_cmd->error_action);
+
+ mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || mbxstatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3010 Run link diag test mailbox failed with "
+ "mbx_status x%x status x%x, add_status x%x\n",
+ mbxstatus, shdr_status, shdr_add_status);
+ }
+
+ diag_status_reply = (struct diag_status *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3012 Received Run link diag test reply "
+ "below minimum size (%d): reply_len:%d\n",
+ (int)(sizeof(struct fc_bsg_request) +
+ sizeof(struct diag_status)),
+ job->reply_len);
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ diag_status_reply->mbox_status = mbxstatus;
+ diag_status_reply->shdr_status = shdr_status;
+ diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+ rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
* lpfcdiag_loop_self_reg - obtains a remote port login id
* @phba: Pointer to HBA context object
* @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
}
/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf;
+ struct pci_dev *pcidev = phba->pcidev;
+
+ /* allocate dma buffer struct */
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ INIT_LIST_HEAD(&dmabuf->list);
+
+ /* now, allocate dma buffer */
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
+
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ return NULL;
+ }
+ memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+ return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+
+ if (!dmabuf)
+ return;
+
+ if (dmabuf->virt)
+ dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+ struct list_head *dmabuf_list)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+ if (list_empty(dmabuf_list))
+ return;
+
+ list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+ list_del_init(&dmabuf->list);
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ }
+ return;
+}
+
+/**
* diag_cmd_data_alloc - fills in a bde struct with dma buffers
* @phba: Pointer to HBA context object
* @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
}
/**
- * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
* @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
*
* This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_test(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
}
/**
- * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
@@ -2422,15 +2954,13 @@ job_error:
* of the mailbox.
**/
void
-lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
- struct lpfc_mbx_nembed_cmd *nembed_sge;
uint32_t size;
unsigned long flags;
- uint8_t *to;
- uint8_t *from;
+ uint8_t *pmb, *pmb_buf;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
- /* build the outgoing buffer to do an sg copy
- * the format is the response mailbox followed by any extended
- * mailbox data
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
*/
- from = (uint8_t *)&pmboxq->u.mb;
- to = (uint8_t *)dd_data->context_un.mbox.mb;
- memcpy(to, from, sizeof(MAILBOX_t));
- if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
- /* copy the extended data if any, count is in words */
- if (dd_data->context_un.mbox.outExtWLen) {
- from = (uint8_t *)dd_data->context_un.mbox.ext;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.outExtWLen *
- sizeof(uint32_t);
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
- from = (uint8_t *)dd_data->context_un.mbox.
- dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = pmboxq->u.mb.un.varWords[5];
- memcpy(to, from, size);
- } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmboxq->u.mb.un.varWords[0];
-
- from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
- virt;
- to += sizeof(MAILBOX_t);
- size = nembed_sge->sge[0].length;
- memcpy(to, from, size);
- } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
- from = (uint8_t *)dd_data->context_un.
- mbox.dmp->dma.virt;
- to += sizeof(MAILBOX_t);
- size = dd_data->context_un.mbox.dmp->size;
- memcpy(to, from, size);
- }
- }
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
- from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
/* need to hold the lock until we set job->dd_data to NULL
* to hold off the timeout handler returning to the mid-layer
* while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
job->dd_data = NULL;
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
} else {
dd_data->context_un.mbox.set_job = NULL;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
- kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(dd_data->context_un.mbox.ext);
- if (dd_data->context_un.mbox.dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dd_data->context_un.mbox.dmp->size,
- dd_data->context_un.mbox.dmp->dma.virt,
- dd_data->context_un.mbox.dmp->dma.phys);
- kfree(dd_data->context_un.mbox.dmp);
- }
- if (dd_data->context_un.mbox.rxbmp) {
- lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
- dd_data->context_un.mbox.rxbmp->phys);
- kfree(dd_data->context_un.mbox.rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
+
+ if (job) {
+ job->reply->result = 0;
+ job->job_done(job);
+ }
return;
}
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+ return;
+
+ /* free all memory, including dma buffers */
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+ /* multi-buffer write mailbox command pass-through complete */
+ memset((char *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ uint8_t *pmb, *pmb_buf;
+ unsigned long flags;
+ uint32_t size;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = pmboxq->context1;
+ /* has the job already timed out? */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job = NULL;
+ goto job_done_out;
+ }
+
+ /*
+ * The outgoing buffer is readily referred from the dma buffer,
+ * just need to get header part from mailboxq structure.
+ */
+ pmb = (uint8_t *)&pmboxq->u.mb;
+ pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+ job = dd_data->context_un.mbox.set_job;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, size);
+ /* result for successful */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* need to hold the lock util we set job->dd_data to NULL
+ * to hold off the timeout handler from midlayer to take
+ * any action.
+ */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2937 SLI_CONFIG ext-buffer maibox command "
+ "(x%x/x%x) complete bsg job done, bsize:%d\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, size);
+ } else
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+ if (!job)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2938 SLI_CONFIG ext-buffer maibox "
+ "command (x%x/x%x) failure, rc:x%x\n",
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType, rc);
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+ kfree(dd_data);
+
+ return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* free base driver mailbox structure memory */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct fc_bsg_job *job;
+
+ /* handle the BSG job with the mailbox command */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "complete, ctxState:x%x, mbxStatus:x%x\n",
+ phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+ /* free all memory, including dma buffers */
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ /* complete the bsg job if we have it */
+ if (job)
+ job->job_done(job);
+
+ return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+ struct lpfc_dmabuf *ext_dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2943 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ } else {
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2944 SLI_CONFIG(mse)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].buf_len,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[index].pa_lo);
+ }
+ } else {
+ if (index == 0) {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(mbx_dmabuf->phys +
+ sizeof(MAILBOX_t));
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3007 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+
+ } else {
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi =
+ putPaddrHigh(ext_dmabuf->phys);
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo =
+ putPaddrLow(ext_dmabuf->phys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3008 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+ index,
+ bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.
+ sli_config_emb1_subsys.hbd[index]),
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_hi,
+ sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[index].pa_lo);
+ }
+ }
+ return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ uint32_t ext_buf_cnt, ext_buf_index;
+ struct lpfc_dmabuf *ext_dmabuf = NULL;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *pmbx;
+ int rc, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2945 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2941 Handled SLI_CONFIG(mse) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ rc = -ENODEV;
+ goto job_error;
+ }
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2946 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ rc = -ERANGE;
+ goto job_error;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2942 Handled SLI_CONFIG(hbd) rd, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ /* reject non-embedded mailbox command with none external buffer */
+ if (ext_buf_cnt == 0) {
+ rc = -EPERM;
+ goto job_error;
+ } else if (ext_buf_cnt > 1) {
+ /* additional external read buffers */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ }
+ }
+
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* for the rest of external buffer descriptors if any */
+ if (ext_buf_cnt > 1) {
+ ext_buf_index = 1;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+ ext_buf_index, dmabuf,
+ curr_dmabuf);
+ ext_buf_index++;
+ }
+ }
+
+ /* construct base driver mbox command */
+ pmb = &pmboxq->u.mb;
+ pmbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, pmbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2947 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2948 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_bsg_dma_page_list_free(phba,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ kfree(dd_data);
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ enum nemb_type nemb_tp,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t ext_buf_cnt;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ uint8_t *mbx;
+ int rc = 0, i;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* pointer to the start of mailbox command */
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (nemb_tp == nemb_mse) {
+ ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2953 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_MSE);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2949 Handled SLI_CONFIG(mse) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ } else {
+ /* sanity check on interface type for support */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -ENODEV;
+ /* nemb_tp == nemb_hbd */
+ ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+ if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2954 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt(%d) out of range(%d)\n",
+ ext_buf_cnt,
+ LPFC_MBX_SLI_CONFIG_MAX_HBD);
+ return -ERANGE;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2950 Handled SLI_CONFIG(hbd) wr, "
+ "ext_buf_cnt:%d\n", ext_buf_cnt);
+ }
+
+ if (ext_buf_cnt == 0)
+ return -EPERM;
+
+ /* for the first external buffer */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+ /* log for looking forward */
+ for (i = 1; i < ext_buf_cnt; i++) {
+ if (nemb_tp == nemb_mse)
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+ i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[i].buf_len);
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+ i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.
+ hbd[i]));
+ }
+
+ /* multi-buffer handling context */
+ phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+ phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+ phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+ phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+ phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+ phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+ if (ext_buf_cnt == 1) {
+ /* bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ mbx = (uint8_t *)dmabuf->virt;
+ memcpy(pmb, mbx, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer read mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2955 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2956 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ }
+
+job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint32_t subsys;
+ uint32_t opcode;
+ int rc = SLI_CONFIG_NOT_HANDLED;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+ switch (opcode) {
+ case FCOE_OPCODE_READ_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2957 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ case FCOE_OPCODE_ADD_FCF:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2958 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2959 Not handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2977 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ } else {
+ subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys);
+ if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ case COMN_OPCODE_READ_OBJECT_LIST:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2960 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ case COMN_OPCODE_WRITE_OBJECT:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2961 Handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+ nemb_hbd, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2962 Not handled SLI_CONFIG "
+ "subsys_comn, opcode:x%x\n",
+ opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ break;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2978 Handled SLI_CONFIG "
+ "subsys:x%d, opcode:x%x\n",
+ subsys, opcode);
+ rc = SLI_CONFIG_NOT_HANDLED;
+ }
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ else
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_dmabuf *dmabuf;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2963 SLI_CONFIG (mse) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ } else {
+ size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+ &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2964 SLI_CONFIG (hbd) ext-buffer rd get "
+ "buffer[%d], size:%d\n", index, size);
+ }
+ if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+ return -EPIPE;
+ dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ struct lpfc_dmabuf, list);
+ list_del_init(&dmabuf->list);
+ pbuf = (uint8_t *)dmabuf->virt;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pbuf, size);
+
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+ "command session done\n");
+ lpfc_bsg_mbox_ext_session_reset(phba);
+ }
+
+ job->reply->result = 0;
+ job->job_done(job);
+
+ return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct bsg_job_data *dd_data = NULL;
+ LPFC_MBOXQ_t *pmboxq = NULL;
+ MAILBOX_t *pmb;
+ enum nemb_type nemb_tp;
+ uint8_t *pbuf;
+ uint32_t size;
+ uint32_t index;
+ int rc;
+
+ index = phba->mbox_ext_buf_ctx.seqNum;
+ phba->mbox_ext_buf_ctx.seqNum++;
+ nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+ phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ pbuf = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pbuf, size);
+
+ if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2966 SLI_CONFIG (mse) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2967 SLI_CONFIG (hbd) ext-buffer wr set "
+ "buffer[%d], size:%d\n",
+ phba->mbox_ext_buf_ctx.seqNum, size);
+
+ }
+
+ /* set up external buffer descriptor and add to external buffer list */
+ lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf,
+ dmabuf);
+ list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2968 SLI_CONFIG ext-buffer wr all %d "
+ "ebuffers received\n",
+ phba->mbox_ext_buf_ctx.numBuf);
+ /* mailbox command structure for base driver */
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, pbuf, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->vport = phba->pport;
+
+ /* callback for multi-buffer write mailbox command */
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+ /* context fields to callback function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+
+ /* state change */
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2969 Issued SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ return 1;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2970 Failed to issue SLI_CONFIG ext-buffer "
+ "maibox command, rc:x%x\n", rc);
+ rc = -EPIPE;
+ goto job_error;
+ }
+
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
+job_error:
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ kfree(dd_data);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2971 SLI_CONFIG buffer (type:x%x)\n",
+ phba->mbox_ext_buf_ctx.mboxType);
+
+ if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2972 SLI_CONFIG rd buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_read_ebuf_get(phba, job);
+ if (rc == SLI_CONFIG_HANDLED)
+ lpfc_bsg_dma_page_free(phba, dmabuf);
+ } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+ if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2973 SLI_CONFIG wr buffer state "
+ "mismatch:x%x\n",
+ phba->mbox_ext_buf_ctx.state);
+ lpfc_bsg_mbox_ext_abort(phba);
+ return -EPIPE;
+ }
+ rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_dmabuf *dmabuf)
+{
+ struct dfc_mbox_req *mbox_req;
+ int rc;
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+ /* mbox command with/without single external buffer */
+ if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+ return SLI_CONFIG_NOT_HANDLED;
+
+ /* mbox command and first external buffer */
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+ if (mbox_req->extSeqNum == 1) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2974 SLI_CONFIG mailbox: tag:%d, "
+ "seq:%d\n", mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+ return rc;
+ } else
+ goto sli_cfg_ext_error;
+ }
+
+ /*
+ * handle additional external buffers
+ */
+
+ /* check broken pipe conditions */
+ if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+ goto sli_cfg_ext_error;
+ if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+ goto sli_cfg_ext_error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2975 SLI_CONFIG mailbox external buffer: "
+ "extSta:x%x, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+ mbox_req->extSeqNum);
+ rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+ return rc;
+
+sli_cfg_ext_error:
+ /* all other cases, broken pipe */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2976 SLI_CONFIG mailbox broken pipe: "
+ "ctxSta:x%x, ctxNumBuf:%d "
+ "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+ phba->mbox_ext_buf_ctx.state,
+ phba->mbox_ext_buf_ctx.numBuf,
+ phba->mbox_ext_buf_ctx.mbxTag,
+ phba->mbox_ext_buf_ctx.seqNum,
+ mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+ lpfc_bsg_mbox_ext_session_reset(phba);
+
+ return -EPIPE;
+}
+
+/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
* @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
- MAILBOX_t *mb = NULL;
+ uint8_t *pmbx = NULL;
struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
- uint32_t size;
- struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
- struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
- struct ulp_bde64 *rxbpl = NULL;
- struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ struct lpfc_dmabuf *dmabuf = NULL;
+ struct dfc_mbox_req *mbox_req;
struct READ_EVENT_LOG_VAR *rdEventLog;
uint32_t transmit_length, receive_length, mode;
+ struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
uint8_t *from;
+ uint32_t size;
+
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ /*
+ * Don't allow mailbox commands to be sent when blocked or when in
+ * the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ mbox_req =
+ (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
(mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
+ dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!dmabuf || !dmabuf->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* Get the mailbox command or external buffer from BSG */
+ pmbx = (uint8_t *)dmabuf->virt;
+ size = job->request_payload.payload_len;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, pmbx, size);
+
+ /* Handle possible SLI_CONFIG with non-embedded payloads */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+ if (rc == SLI_CONFIG_HANDLED)
+ goto job_cont;
+ if (rc)
+ goto job_done;
+ /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+ }
+
+ rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+ if (rc != 0)
+ goto job_done; /* must be negative */
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
goto job_done;
}
- mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
- if (!mb) {
- rc = -ENOMEM;
- goto job_done;
- }
-
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
- size = job->request_payload.payload_len;
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
-
- rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0)
- goto job_done; /* must be negative */
-
pmb = &pmboxq->u.mb;
- memcpy(pmb, mb, sizeof(*pmb));
+ memcpy(pmb, pmbx, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
"0x%x while in stopped state.\n",
pmb->mbxCommand);
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_done;
- }
-
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
- if (!ext) {
- rc = -ENOMEM;
- goto job_done;
- }
-
/* any data for the device? */
if (mbox_req->inExtWLen) {
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)ext, from,
- mbox_req->inExtWLen * sizeof(uint32_t));
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
}
-
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
- putPaddrLow(dmp->dma.phys);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
- putPaddrHigh(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
+ putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
- putPaddrLow(dmp->dma.phys +
- pmb->un.varBIUdiag.un.s2.
- xmit_bde64.tus.f.bdeSize);
-
- /* copy the transmit data found in the mailbox extension area */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+ + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
rdEventLog = &pmb->un.varRdEventLog;
receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* mode zero uses a bde like biu diags command */
if (mode == 0) {
-
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- if (rxbpl) {
- INIT_LIST_HEAD(&rxbmp->list);
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- receive_length, 0);
- }
-
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ if (receive_length == 0) {
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
- pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
pmb->un.varUpdateCfg.co) {
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_done;
}
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl,
- bde->tus.f.bdeSize, 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- INIT_LIST_HEAD(&dmp->dma.list);
- bde->addrHigh = putPaddrHigh(dmp->dma.phys);
- bde->addrLow = putPaddrLow(dmp->dma.phys);
-
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- bde->tus.f.bdeSize);
+ bde->addrHigh = putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ bde->addrLow = putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
- /* rebuild the command for sli4 using our own buffers
- * like we do for biu diags
- */
- header = (struct mbox_header *)&pmb->un.varWords[0];
- nembed_sge = (struct lpfc_mbx_nembed_cmd *)
- &pmb->un.varWords[0];
- receive_length = nembed_sge->sge[0].length;
-
- /* receive length cannot be greater than mailbox
- * extension size
- */
- if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
- rc = -ERANGE;
- goto job_done;
- }
-
- rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!rxbmp) {
- rc = -ENOMEM;
- goto job_done;
- }
-
- rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- if (!rxbmp->virt) {
- rc = -ENOMEM;
- goto job_done;
- }
+ /* Handling non-embedded SLI_CONFIG mailbox command */
+ sli4_config = &pmboxq->u.mqe.un.sli4_config;
+ if (!bf_get(lpfc_mbox_hdr_emb,
+ &sli4_config->header.cfg_mhdr)) {
+ /* rebuild the command for sli4 using our
+ * own buffers like we do for biu diags
+ */
+ header = (struct mbox_header *)
+ &pmb->un.varWords[0];
+ nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+ &pmb->un.varWords[0];
+ receive_length = nembed_sge->sge[0].length;
+
+ /* receive length cannot be greater than
+ * mailbox extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
- 0);
- if (!dmp) {
- rc = -ENOMEM;
- goto job_done;
+ nembed_sge->sge[0].pa_hi =
+ putPaddrHigh(dmabuf->phys
+ + sizeof(MAILBOX_t));
+ nembed_sge->sge[0].pa_lo =
+ putPaddrLow(dmabuf->phys
+ + sizeof(MAILBOX_t));
}
-
- INIT_LIST_HEAD(&dmp->dma.list);
- nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
- nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
- /* copy the transmit data found in the mailbox
- * extension area
- */
- from = (uint8_t *)mb;
- from += sizeof(MAILBOX_t);
- memcpy((uint8_t *)dmp->dma.virt, from,
- header->cfg_mhdr.payload_length);
}
}
- dd_data->context_un.mbox.rxbmp = rxbmp;
- dd_data->context_un.mbox.dmp = dmp;
+ dd_data->context_un.mbox.dmabuffers = dmabuf;
/* setup wake call as IOCB callback */
- pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+ pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
- dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
dd_data->context_un.mbox.set_job = job;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* job finished, copy the data */
- memcpy(mb, pmb, sizeof(*pmb));
+ memcpy(pmbx, pmb, sizeof(*pmb));
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
+ job->reply_payload.sg_cnt,
+ pmbx, size);
/* not waiting mbox already done */
rc = 0;
goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
job_done:
/* common exit for error or job completed inline */
- kfree(mb);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- kfree(ext);
- if (dmp) {
- dma_free_coherent(&phba->pcidev->dev,
- dmp->size, dmp->dma.virt,
- dmp->dma.phys);
- kfree(dmp);
- }
- if (rxbmp) {
- lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
- kfree(rxbmp);
- }
+ lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
+job_cont:
return rc;
}
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct dfc_mbox_req *mbox_req;
int rc = 0;
- /* in case no data is transferred */
+ /* mix-and-match backward compatibility */
job->reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2737 Received MBOX_REQ request below "
- "minimum size\n");
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
- rc = -EINVAL;
- goto job_error;
- }
-
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- rc = -EAGAIN;
- goto job_error;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2737 Mix-and-match backward compability "
+ "between MBOX_REQ old size:%d and "
+ "new request size:%d\n",
+ (int)(job->request_len -
+ sizeof(struct fc_bsg_request)),
+ (int)sizeof(struct dfc_mbox_req));
+ mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ mbox_req->extMboxTag = 0;
+ mbox_req->extSeqNum = 0;
}
rc = lpfc_bsg_issue_mbox(phba, job, vport);
-job_error:
if (rc == 0) {
/* job done */
job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
rc = lpfc_bsg_send_mgmt_rsp(job);
break;
case LPFC_BSG_VENDOR_DIAG_MODE:
- rc = lpfc_bsg_diag_mode(job);
+ rc = lpfc_bsg_diag_loopback_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE_END:
+ rc = lpfc_sli4_bsg_diag_mode_end(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+ rc = lpfc_bsg_diag_loopback_run(job);
break;
- case LPFC_BSG_VENDOR_DIAG_TEST:
- rc = lpfc_bsg_diag_test(job);
+ case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+ rc = lpfc_sli4_bsg_link_diag_test(job);
break;
case LPFC_BSG_VENDOR_GET_MGMT_REV:
rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
/* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
+ if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
break;
case TYPE_MENLO:
menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5ae..c8c2b47ea886 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
* These are the vendor unique structures passed in using the bsg
* FC_BSG_HST_VENDOR message code type.
*/
-#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
-#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
-#define LPFC_BSG_VENDOR_DIAG_MODE 4
-#define LPFC_BSG_VENDOR_DIAG_TEST 5
-#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
-#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
struct set_ct_event {
uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
uint32_t timeout;
};
+struct sli4_link_diag {
+ uint32_t command;
+ uint32_t timeout;
+ uint32_t test_id;
+ uint32_t loops;
+ uint32_t test_version;
+ uint32_t error_action;
+};
+
struct diag_mode_test {
uint32_t command;
};
+struct diag_status {
+ uint32_t mbox_status;
+ uint32_t shdr_status;
+ uint32_t shdr_add_status;
+};
+
#define LPFC_WWNN_TYPE 0
#define LPFC_WWPN_TYPE 1
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
};
#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
struct dfc_mbox_req {
uint32_t command;
uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
+ uint32_t extMboxTag;
+ uint32_t extSeqNum;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
#define lpfc_mbox_sli_config_mse_len_WORD buf_len
};
-struct lpfc_sli_config_subcmd_hbd {
+struct lpfc_sli_config_hbd {
uint32_t buf_len;
#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
uint32_t reserved5;
};
-struct lpfc_sli_config_generic {
+struct lpfc_sli_config_emb0_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+ uint32_t padding;
+ uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT 0
+#define lpfc_emb0_subcmnd_opcode_MASK 0xff
+#define lpfc_emb0_subcmnd_opcode_WORD word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT 8
+#define lpfc_emb0_subcmnd_subsys_MASK 0xff
+#define lpfc_emb0_subcmnd_subsys_WORD word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE 0x0C
+#define FCOE_OPCODE_READ_FCF 0x08
+#define FCOE_OPCODE_ADD_FCF 0x09
};
-struct lpfc_sli_config_subcmnd {
+struct lpfc_sli_config_emb1_subsys {
struct lpfc_sli_config_hdr sli_config_hdr;
uint32_t word6;
-#define lpfc_subcmnd_opcode_SHIFT 0
-#define lpfc_subcmnd_opcode_MASK 0xff
-#define lpfc_subcmnd_opcode_WORD word6
-#define lpfc_subcmnd_subsys_SHIFT 8
-#define lpfc_subcmnd_subsys_MASK 0xff
-#define lpfc_subcmnd_subsys_WORD word6
+#define lpfc_emb1_subcmnd_opcode_SHIFT 0
+#define lpfc_emb1_subcmnd_opcode_MASK 0xff
+#define lpfc_emb1_subcmnd_opcode_WORD word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT 8
+#define lpfc_emb1_subcmnd_subsys_MASK 0xff
+#define lpfc_emb1_subcmnd_subsys_WORD word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN 0x01
+#define COMN_OPCODE_READ_OBJECT 0xAB
+#define COMN_OPCODE_WRITE_OBJECT 0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
+#define COMN_OPCODE_DELETE_OBJECT 0xAE
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
uint32_t rd_offset;
uint32_t obj_name[26];
uint32_t hbd_count;
-#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10
- struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
+ struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
};
struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
#define lpfc_mqe_command_MASK 0x000000FF
#define lpfc_mqe_command_WORD word0
union {
- struct lpfc_sli_config_generic sli_config_generic;
- struct lpfc_sli_config_subcmnd sli_config_subcmnd;
+ struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+ struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
} un;
};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED 0
+#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eedb..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+
/* externs BlockGuard */
extern char *_dump_buf_data;
extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
/* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca058603..ffe82d169b40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ fcp_qidx = 0;
+ do {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ qidx = 0;
+ do {
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
- }
+ } while (++qidx < phba->cfg_fcp_eq_count);
goto error_out;
break;
case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.myID = vport->fc_myDID;
/* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = vport->vpi + phba->vpi_base;
+ icmd->ulpContext = phba->vpi_ids[vport->vpi];
icmd->ulpCt_h = 0;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
rc = -ENOMEM;
goto fail_free_dmabuf;
}
+
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
+ int i;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
vport = phba->pport;
else
vport = lpfc_find_vport_by_vpid(phba,
- icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
+ icmd->unsli3.rcvsli3.vpi);
}
+
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
/* Set the ulpContext to the vpi */
- elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+ elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
} else {
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e2038..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
- lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(vport->fc_nodename));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(vport->fc_portname));
+ lpfc_update_vport_wwn(vport);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
return;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
-
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
+ uint16_t rpi;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ /* SLI4 ports require the physical rpi value. */
+ rpi = ndlp->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+ mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- /*
- * For SLI3, cmpl_reg_vpi will set port_state to READY, and
- * continue discovery.
- */
+ /* Register the VPI for SLI3, NON-NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
if (phba->sli_rev < LPFC_SLI_REV4) {
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_reg_vpi(phba, vport);
- else { /* NPIV Not enabled */
+ else {
lpfc_issue_clear_la(phba, vport);
vport->port_state = LPFC_VPORT_READY;
}
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
- ndlp->nlp_rpi = mb->un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
+ /*
+ * IF the CVL_RCVD bit is not set then we have sent the
+ * flogi.
+ * If dev_loss fires while we are waiting we do not want to
+ * unreg the fcf.
+ */
+ if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+ spin_unlock_irq(shost->host_lock);
+ ret = 1;
+ goto out;
+ }
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686a..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
#define SLI3_IOCB_CMD_SIZE 128
#define SLI3_IOCB_RSP_SIZE 64
+#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
#define rrq_rxid_WORD rrq_exchg
};
+#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
struct RTV_RSP { /* Structure is in Big Endian format */
uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
#define PCI_DEVICE_ID_BALIUS 0xe131
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_LANCER_FC 0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
#define MAILBOX_HBA_EXT_OFFSET 0x100
/* max mbox xmit size is a page size for sysfs IO operations */
-#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
+#define MAILBOX_SYSFS_MAX 4096
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebdad..11e26a26b5d1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
+/* SLI4 interface type-2 control register offsets */
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
+
+/* Some SLI4 interface type-2 PDEV_CTL register bits */
+#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
+#define LPFC_CTL_PDEV_CTL_DD 0x00000004
+#define LPFC_CTL_PDEV_CTL_LC 0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+
+#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
@@ -210,9 +229,26 @@ struct ulp_bde64 {
struct lpfc_sli4_flags {
uint32_t word0;
-#define lpfc_fip_flag_SHIFT 0
-#define lpfc_fip_flag_MASK 0x00000001
-#define lpfc_fip_flag_WORD word0
+#define lpfc_idx_rsrc_rdy_SHIFT 0
+#define lpfc_idx_rsrc_rdy_MASK 0x00000001
+#define lpfc_idx_rsrc_rdy_WORD word0
+#define LPFC_IDX_RSRC_RDY 1
+#define lpfc_xri_rsrc_rdy_SHIFT 1
+#define lpfc_xri_rsrc_rdy_MASK 0x00000001
+#define lpfc_xri_rsrc_rdy_WORD word0
+#define LPFC_XRI_RSRC_RDY 1
+#define lpfc_rpi_rsrc_rdy_SHIFT 2
+#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD word0
+#define LPFC_RPI_RSRC_RDY 1
+#define lpfc_vpi_rsrc_rdy_SHIFT 3
+#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD word0
+#define LPFC_VPI_RSRC_RDY 1
+#define lpfc_vfi_rsrc_rdy_SHIFT 4
+#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD word0
+#define LPFC_VFI_RSRC_RDY 1
};
struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
#define lpfc_mbox_hdr_version_SHIFT 0
#define lpfc_mbox_hdr_version_MASK 0x000000FF
#define lpfc_mbox_hdr_version_WORD word9
+#define lpfc_mbox_hdr_pf_num_SHIFT 16
+#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD word9
+#define lpfc_mbox_hdr_vh_num_SHIFT 24
+#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD word9
#define LPFC_Q_CREATE_VERSION_2 2
#define LPFC_Q_CREATE_VERSION_1 1
#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
} response;
};
-/* Mailbox structures */
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
struct mbox_header {
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
union lpfc_sli4_cfg_shdr cfg_shdr;
};
+#define LPFC_EXTENT_LOCAL 0
+#define LPFC_TIMEOUT_DEFAULT 0
+#define LPFC_EXTENT_VERSION_DEFAULT 0
+
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
/* Mailbox command structures */
struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
} u;
};
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI 0x20
+#define LPFC_RSC_TYPE_FCOE_VPI 0x21
+#define LPFC_RSC_TYPE_FCOE_RPI 0x22
+#define LPFC_RSC_TYPE_FCOE_XRI 0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
+ } rsp;
+ } u;
+};
+
+struct lpfc_id_range {
+ uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
+#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
+#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT 0
+#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_WORD word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
+#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
+#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT 16
+#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
+#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
+#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD word2
+ } req;
+ struct {
+ uint32_t word0;
+ } rsp;
+ } u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges. For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
+ } req;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT 0
+#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD word4
+ struct lpfc_id_range id[53];
+ } rsp;
+ } u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above. This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ uint32_t word4;
+ struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+ struct mbox_header header;
+ struct {
+ uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
+ } req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
struct lpfc_mbx_post_hdr_tmpl {
struct mbox_header header;
uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
uint32_t word2;
#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
-#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
+#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
#define lpfc_sli4_sge_offset_WORD word2
#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
struct lpfc_mbx_read_config {
uint32_t word1;
-#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
-#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_bbc_WORD word1
-#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
-#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_init_bbc_WORD word1
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
uint32_t word2;
-#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
-#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
-#define lpfc_mbx_rd_conf_nport_did_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
- uint32_t word3;
-#define lpfc_mbx_rd_conf_ao_SHIFT 0
-#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
-#define lpfc_mbx_rd_conf_ao_WORD word3
-#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
-#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_bb_scn_WORD word3
-#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
-#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
-#define lpfc_mbx_rd_conf_mc_SHIFT 29
-#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
-#define lpfc_mbx_rd_conf_mc_WORD word3
+ uint32_t rsvd_3;
uint32_t word4;
#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
- uint32_t word5;
-#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_lp_tov_WORD word5
+ uint32_t rsvd_5;
uint32_t word6;
#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
- uint32_t word7;
-#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
- uint32_t word8;
-#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
-#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
-#define lpfc_mbx_rd_conf_al_tov_WORD word8
+ uint32_t rsvd_7;
+ uint32_t rsvd_8;
uint32_t word9;
#define lpfc_mbx_rd_conf_lmt_SHIFT 0
#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_lmt_WORD word9
- uint32_t word10;
-#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
-#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
-#define lpfc_mbx_rd_conf_max_alpa_WORD word10
- uint32_t word11_rsvd;
+ uint32_t rsvd_10;
+ uint32_t rsvd_11;
uint32_t word12;
#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_vfi_count_WORD word15
uint32_t word16;
-#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
-#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
-#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
#define cfg_fcoe_SHIFT 0
#define cfg_fcoe_MASK 0x00000001
#define cfg_fcoe_WORD word12
+#define cfg_ext_SHIFT 1
+#define cfg_ext_MASK 0x00000001
+#define cfg_ext_WORD word12
+#define cfg_hdrr_SHIFT 2
+#define cfg_hdrr_MASK 0x00000001
+#define cfg_hdrr_WORD word12
#define cfg_phwq_SHIFT 15
#define cfg_phwq_MASK 0x00000001
#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
struct lpfc_sli4_parameters sli4_parameters;
};
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE 18
+ uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+ uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT 0
+#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
+ uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
+ uint32_t reserved;
+ uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+ uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
+ uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
+ uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
+ uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
+ uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
+ uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM 2
+ uint32_t rsrc_desc_count;
+ struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+ struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
+ union {
+ struct {
+ uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
+ } request;
+ struct {
+ struct lpfc_prof_cfg prof_cfg;
+ } response;
+ } u;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+struct lpfc_mbx_wr_object {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT 31
+#define lpfc_wr_object_eof_MASK 0x00000001
+#define lpfc_wr_object_eof_WORD word4
+#define lpfc_wr_object_write_length_SHIFT 0
+#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD word4
+ uint32_t write_offset;
+ uint32_t object_name[26];
+ uint32_t bde_count;
+ struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+ } request;
+ struct {
+ uint32_t actual_write_length;
+ } response;
+ } u;
+};
+
/* mailbox queue entry structure */
struct lpfc_mqe {
uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
struct lpfc_mbx_cq_destroy cq_destroy;
struct lpfc_mbx_wq_destroy wq_destroy;
struct lpfc_mbx_rq_destroy rq_destroy;
+ struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+ struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+ struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
struct lpfc_mbx_post_sgl_pages post_sgl_pages;
struct lpfc_mbx_nembed_cmd nembed_cmd;
struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ struct lpfc_mbx_set_link_diag_state link_diag_state;
+ struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+ struct lpfc_mbx_run_link_diag_test link_diag_test;
+ struct lpfc_mbx_get_func_cfg get_func_cfg;
+ struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_wr_object wr_object;
} un;
};
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
#define SGL_ALIGN_SZ 64
#define SGL_PAGE_SIZE 4096
/* align SGL addr on a size boundary - adjust address up */
-#define NO_XRI ((uint16_t)-1)
+#define NO_XRI 0xffff
struct wqe_common {
uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP 0xf7
+#define LPFC_FILE_ID_GROUP 0xa2
+struct lpfc_grp_hdr {
+ uint32_t size;
+ uint32_t magic_number;
+ uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT 24
+#define lpfc_grp_hdr_file_type_MASK 0x000000FF
+#define lpfc_grp_hdr_file_type_WORD word2
+#define lpfc_grp_hdr_id_SHIFT 16
+#define lpfc_grp_hdr_id_MASK 0x000000FF
+#define lpfc_grp_hdr_id_WORD word2
+ uint8_t rev_name[128];
+};
+
#define FCP_COMMAND 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
+#define LPFC_FW_DUMP 1
+#define LPFC_FW_RESET 2
+#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
goto out_free_mbox;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
}
/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ * cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ * None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+ /* If the soft name exists then update it using the service params */
+ if (vport->phba->cfg_soft_wwnn)
+ u64_to_wwn(vport->phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
+ if (vport->phba->cfg_soft_wwpn)
+ u64_to_wwn(vport->phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+
+ /*
+ * If the name is empty or there exists a soft name
+ * then copy the service params name, otherwise use the fc name
+ */
+ if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name));
+
+ if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ else
+ memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name));
+}
+
+/**
* lpfc_config_port_post - Perform lpfc initialization after config port
* @phba: pointer to lpfc hba data structure.
*
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
-
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
-
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_10Gb)
+ if (phba->lmt & LMT_16Gb)
+ max_speed = 16;
+ else if (phba->lmt & LMT_10Gb)
max_speed = 10;
else if (phba->lmt & LMT_8Gb)
max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
"Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FC:
- oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+ case PCI_DEVICE_ID_LANCER_FC_VF:
+ m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
break;
case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
oneConnect = 1;
- m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+ m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
break;
default:
m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
if (mdp && mdp[0] == '\0')
snprintf(mdp, 79,"%s", m.name);
- /* oneConnect hba requires special processing, they are all initiators
+ /*
+ * oneConnect hba requires special processing, they are all initiators
* and we put the port number on the end
*/
if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
kfree(io);
phba->total_iocbq_bufs--;
}
+
spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
vport = lpfc_find_vport_by_vpid(phba,
acqe_fip->index - phba->vpi_base);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_try_set_mwi(pdev);
pci_save_state(pdev);
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ pdev->needs_freset = 1;
+
return 0;
out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ int rc;
+
+ rc = pci_enable_sriov(pdev, nr_vfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2806 Failed to enable sriov on this device "
+ "with vfn number nr_vf:%d, rc:%d\n",
+ nr_vfn, rc);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2807 Successful enable sriov on this device "
+ "with vfn number nr_vf:%d\n", nr_vfn);
+ return rc;
+}
+
+/**
* lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
* @phba: pointer to lpfc hba data structure.
*
@@ -4011,6 +4079,7 @@ static int
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
+ int rc;
/*
* Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2808 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return 0;
}
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fcf.redisc_wait.data = (unsigned long)phba;
/*
+ * Control structure for handling external multi-buffer mailbox
+ * command pass-through.
+ */
+ memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+ sizeof(struct lpfc_mbox_ext_buf_ctx));
+ INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+ /*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
/*
- * Initialize dirver internal slow-path work queues
+ * Initialize driver internal slow-path work queues
*/
/* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Receive queue CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+ /* Initialize extent block lists. */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+ INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
/* Initialize the driver internal SLI layer lists. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
/*
* Get sli4 parameters that override parameters from Port capabilities.
- * If this call fails it is not a critical error so continue loading.
+ * If this call fails, it isn't critical unless the SLI4 parameters come
+ * back in conflict.
*/
- lpfc_get_sli4_parameters(phba, mboxq);
+ rc = lpfc_get_sli4_parameters(phba, mboxq);
+ if (rc) {
+ if (phba->sli4_hba.extents_in_use &&
+ phba->sli4_hba.rpi_hdrs_in_use) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
+ goto out_free_bsmbx;
+ }
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"1430 Failed to initialize sgl list.\n");
goto out_free_sgl_list;
}
-
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
+ rc = -ENOMEM;
goto out_remove_rpi_hdrs;
}
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path "
"per-EQ handle array\n");
+ rc = -ENOMEM;
goto out_free_fcf_rr_bmask;
}
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2573 Failed allocate memory for msi-x "
"interrupt vector entries\n");
+ rc = -ENOMEM;
goto out_free_fcp_eq_hdl;
}
+ /*
+ * Enable sr-iov virtual functions if supported and configured
+ * through the module parameter.
+ */
+ if (phba->cfg_sriov_nr_virtfn > 0) {
+ rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+ phba->cfg_sriov_nr_virtfn);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3020 Requested number of SR-IOV "
+ "virtual functions (%d) is not "
+ "supported\n",
+ phba->cfg_sriov_nr_virtfn);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+ }
+
return rc;
out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_cq_event_release_all(phba);
lpfc_sli4_cq_event_pool_destroy(phba);
+ /* Release resource identifiers. */
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+
/* Free the bsmbx region. */
lpfc_destroy_bootstrap_mbox(phba);
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
"Unloading driver.\n", __func__);
goto out_free_iocbq;
}
+ iocbq_entry->sli4_lxritag = NO_XRI;
iocbq_entry->sli4_xritag = NO_XRI;
spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2400 lpfc_init_sgl_list els %d.\n",
+ "2400 ELS XRI count %d.\n",
els_xri_cnt);
/* Initialize and populate the sglq list per host/VF. */
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max =
phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
phba->sli4_hba.scsi_xri_cnt = 0;
-
phba->sli4_hba.lpfc_scsi_psb_array =
kzalloc((sizeof(struct lpfc_scsi_buf *) *
phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
goto out_free_mem;
}
- sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (sglq_entry->sli4_xritag == NO_XRI) {
- kfree(sglq_entry);
- printk(KERN_ERR "%s: failed to allocate XRI.\n"
- "Unloading driver.\n", __func__);
- goto out_free_mem;
- }
sglq_entry->buff_type = GEN_BUFF_TYPE;
sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
{
int rc = 0;
- int longs;
- uint16_t rpi_count;
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
-
/*
- * Provision an rpi bitmask range for discovery. The total count
- * is the difference between max and base + 1.
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
*/
- rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
-
- longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
- GFP_KERNEL);
- if (!phba->sli4_hba.rpi_bmask)
- return -ENOMEM;
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ return rc;
+ }
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
uint32_t rpi_count;
+ /*
+ * If the SLI4 port supports extents, posting the rpi header isn't
+ * required. Set the expected maximum count and let the actual value
+ * get set when extents are fully allocated.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return NULL;
+ if (phba->sli4_hba.extents_in_use)
+ return NULL;
+
+ /* The limit on the logical index is just the max_rpi count. */
rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
- phba->sli4_hba.max_cfg_param.max_rpi - 1;
+ phba->sli4_hba.max_cfg_param.max_rpi - 1;
spin_lock_irq(&phba->hbalock);
- curr_rpi_range = phba->sli4_hba.next_rpi;
+ /*
+ * Establish the starting RPI in this header block. The starting
+ * rpi is normalized to a zero base because the physical rpi is
+ * port based.
+ */
+ curr_rpi_range = phba->sli4_hba.next_rpi -
+ phba->sli4_hba.max_cfg_param.rpi_base;
spin_unlock_irq(&phba->hbalock);
/*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
else
rpi_count = LPFC_RPI_HDR_COUNT;
+ if (!rpi_count)
+ return NULL;
/*
* First allocate the protocol header region for the port. The
* port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
rpi_hdr->page_count = 1;
spin_lock_irq(&phba->hbalock);
- rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+
+ /* The rpi_hdr stores the logical index only. */
+ rpi_hdr->start_rpi = curr_rpi_range;
list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
/*
- * The next_rpi stores the next module-64 rpi value to post
- * in any subsequent rpi memory region postings.
+ * The next_rpi stores the next logical module-64 rpi value used
+ * to post physical rpis in subsequent rpi postings.
*/
phba->sli4_hba.next_rpi += rpi_count;
spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove all memory resources allocated
- * to support rpis. This routine presumes the caller has released all
- * rpis consumed by fabric or port logins and is prepared to have
- * the header pages removed.
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
**/
void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+
list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
&phba->sli4_hba.lpfc_rpi_hdr_list, list) {
list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
kfree(rpi_hdr->dmabuf);
kfree(rpi_hdr);
}
-
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
- memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+ exit:
+ /* There are no rpis available to the port now. */
+ phba->sli4_hba.next_rpi = 0;
}
/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&reg_data.word0) ||
- bf_get(lpfc_sliport_status_err, &reg_data)) {
+ (bf_get(lpfc_sliport_status_err, &reg_data) &&
+ !bf_get(lpfc_sliport_status_rn, &reg_data))) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
struct lpfc_mbx_read_config *rd_config;
- uint32_t rc = 0;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_mbx_get_func_cfg *get_func_cfg;
+ struct lpfc_rsrc_desc_fcfcoe *desc;
+ uint32_t desc_count;
+ int length, i, rc = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ phba->sli4_hba.extents_in_use =
+ bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_fcfi =
bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
- phba->sli4_hba.max_cfg_param.fcfi_base =
- bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_eq =
bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2003 cfg params XRI(B:%d M:%d), "
+ "2003 cfg params Extents? %d "
+ "XRI(B:%d M:%d), "
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI(B:%d M:%d)\n",
+ "FCFI(Count:%d)\n",
+ phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_vfi,
phba->sli4_hba.max_cfg_param.rpi_base,
phba->sli4_hba.max_cfg_param.max_rpi,
- phba->sli4_hba.max_cfg_param.fcfi_base,
phba->sli4_hba.max_cfg_param.max_fcfi);
}
- mempool_free(pmb, phba->mbox_mem_pool);
+
+ if (rc)
+ goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->cfg_hba_queue_depth =
phba->sli4_hba.max_cfg_param.max_xri -
lpfc_sli4_get_els_iocb_cnt(phba);
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ goto read_cfg_out;
+
+ /* get the pf# and vf# for SLI4 if_type 2 port */
+ length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc || shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3026 Mailbox failed , mbxCmd x%x "
+ "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ rc = -EIO;
+ goto read_cfg_out;
+ }
+
+ /* search for fc_fcoe resrouce descriptor */
+ get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+ desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+ for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+ desc = (struct lpfc_rsrc_desc_fcfcoe *)
+ &get_func_cfg->func_cfg.desc[i];
+ if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+ bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+ phba->sli4_hba.iov.pf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+ phba->sli4_hba.iov.vf_number =
+ bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+ break;
+ }
+ }
+
+ if (i < LPFC_RSRC_DESC_MAX_NUM)
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+ "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+ phba->sli4_hba.iov.vf_number);
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3028 GET_FUNCTION_CONFIG: failed to find "
+ "Resrouce Descriptor:x%x\n",
+ LPFC_RSRC_DESC_TYPE_FCFCOE);
+ rc = -EIO;
+ }
+
+read_cfg_out:
+ mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = NULL;
/* Release FCP response complete queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+ fcp_qidx = 0;
+ do
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+ fcp_cqidx = 0;
+ do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
goto out_destroy_fcp_cq;
}
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.fp_eq[fcp_cqidx],
- LPFC_WCQ, LPFC_FCP);
+ if (phba->cfg_fcp_eq_count)
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.fp_eq[fcp_cqidx],
+ LPFC_WCQ, LPFC_FCP);
+ else
+ rc = lpfc_cq_create(phba,
+ phba->sli4_hba.fcp_cq[fcp_cqidx],
+ phba->sli4_hba.sp_eq,
+ LPFC_WCQ, LPFC_FCP);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent eq[%d]-id=%d\n",
+ "parent %seq[%d]-id=%d\n",
fcp_cqidx,
phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+ (phba->cfg_fcp_eq_count) ? "" : "sp_",
fcp_cqidx,
- phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
- }
+ (phba->cfg_fcp_eq_count) ?
+ phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+ phba->sli4_hba.sp_eq->queue_id);
+ } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
/*
* Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
fcp_cq_index,
phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
/* Round robin FCP Work Queue's Completion Queue assignment */
- fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+ if (phba->cfg_fcp_eq_count)
+ fcp_cq_index = ((fcp_cq_index + 1) %
+ phba->cfg_fcp_eq_count);
}
/*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
if (rdy_chk < 1000)
break;
}
+ /* delay driver action following IF_TYPE_2 function reset */
+ msleep(100);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
/*
* Assign MSI-X vectors to interrupt handlers
*/
-
- /* The first vector must associated to slow-path handler for MQ */
- rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
- &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
- LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (vectors > 1)
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+ LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ else
+ /* All Interrupts need to be handled by one EQ */
+ rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+ &lpfc_sli4_intr_handler, IRQF_SHARED,
+ LPFC_DRIVER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
{
int wait_cnt = 0;
LPFC_MBOXQ_t *mboxq;
+ struct pci_dev *pdev = phba->pcidev;
lpfc_stop_hba_timers(phba);
phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return rc;
}
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int length;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
+ /*
+ * By default, the driver assumes the SLI4 port requires RPI
+ * header postings. The SLI4_PARAM response will correct this
+ * assumption.
+ */
+ phba->sli4_hba.rpi_hdrs_in_use = 1;
+
/* Read the port's SLI4 Config Parameters */
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
mbx_sli4_parameters);
+ phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+ phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+ /* Make sure that sge_supp_len can be handled by the driver */
+ if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+ sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
return 0;
}
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
+ /* Disable SR-IOV if enabled */
+ if (phba->cfg_sriov_nr_virtfn)
+ pci_disable_sriov(pdev);
+
/* Disable interrupt */
lpfc_sli_disable_intr(phba);
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
}
/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+ char fwrev[32];
+ struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+ struct list_head dma_buffer_list;
+ int i, rc = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+ uint32_t offset = 0, temp_offset = 0;
+
+ INIT_LIST_HEAD(&dma_buffer_list);
+ if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+ (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
+ (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+ (image->size != fw->size)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 Invalid FW image found. "
+ "Magic:%d Type:%x ID:%x\n",
+ image->magic_number,
+ bf_get(lpfc_grp_hdr_file_type, image),
+ bf_get(lpfc_grp_hdr_id, image));
+ return -EINVAL;
+ }
+ lpfc_decode_firmware_rev(phba, fwrev, 1);
+ if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3023 Updating Firmware. Current Version:%s "
+ "New Version:%s\n",
+ fwrev, image->rev_name);
+ for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dmabuf->list, &dma_buffer_list);
+ }
+ while (offset < fw->size) {
+ temp_offset = offset;
+ list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+ if (offset + SLI4_PAGE_SIZE > fw->size) {
+ temp_offset += fw->size - offset;
+ memcpy(dmabuf->virt,
+ fw->data + temp_offset,
+ fw->size - offset);
+ break;
+ }
+ temp_offset += SLI4_PAGE_SIZE;
+ memcpy(dmabuf->virt, fw->data + temp_offset,
+ SLI4_PAGE_SIZE);
+ }
+ rc = lpfc_wr_object(phba, &dma_buffer_list,
+ (fw->size - offset), &offset);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update failed. "
+ "%d\n", rc);
+ goto out;
+ }
+ }
+ rc = offset;
+ }
+out:
+ list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ return rc;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
int error;
uint32_t cfg_mode, intr_mode;
int mcnt;
+ int adjusted_fcp_eq_count;
+ int fcp_qidx;
+ const struct firmware *fw;
+ uint8_t file_name[16];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
- /* Default to single FCP EQ for non-MSI-X */
+ /* Default to single EQ for non-MSI-X */
if (phba->intr_type != MSIX)
- phba->cfg_fcp_eq_count = 1;
- else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
- phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ adjusted_fcp_eq_count = 0;
+ else if (phba->sli4_hba.msix_vec_nr <
+ phba->cfg_fcp_eq_count + 1)
+ adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+ else
+ adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+ /* Free unused EQs */
+ for (fcp_qidx = adjusted_fcp_eq_count;
+ fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ /* do not delete the first fcp_cq */
+ if (fcp_qidx)
+ lpfc_sli4_queue_free(
+ phba->sli4_hba.fcp_cq[fcp_qidx]);
+ }
+ phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* check for firmware upgrade or downgrade */
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
+
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
- mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
/* save address for completion */
pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
- if (vpi != 0xffff)
- vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
+ if ((vpi != 0xffff) &&
+ (phba->sli_rev == LPFC_SLI_REV4))
+ mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- mb->un.varRegLogin.rpi = rpi;
- if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
- return 1;
- }
- mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"rpi x%x\n", vpi, did, rpi);
- return (1);
+ return 1;
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
- return (0);
+ return 0;
}
/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
*
* This routine prepares the mailbox command for unregistering remote port
* login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->un.varUnregLogin.rpi = (uint16_t) rpi;
+ mb->un.varUnregLogin.rpi = rpi;
mb->un.varUnregLogin.rsvd1 = 0;
- mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev >= LPFC_SLI_REV3)
+ mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
- lpfc_unreg_login(phba, vport->vpi,
- vport->vpi + phba->vpi_base, mbox);
- mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ /*
+ * For SLI4 functions, the rpi field is overloaded for
+ * the vport context unreg all. This routine passes
+ * 0 for the rpi field in lpfc_unreg_login for compatibility
+ * with SLI3 and then overrides the rpi field with the
+ * expected value for SLI4.
+ */
+ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+ mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
mb->un.varRegVpi.upd = 1;
- mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+
+ mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
mb->un.varRegVpi.sid = vport->fc_myDID;
- mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+ else
+ mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
sizeof(struct lpfc_name));
mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- if (phba->sli_rev < LPFC_SLI_REV4)
- mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
- else
- mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+ else if (phba->sli_rev >= LPFC_SLI_REV4)
+ mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
return length;
}
- /* Setup for the none-embedded mbox command */
+ /* Setup for the non-embedded mbox command */
pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
- mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* The sub-header is in DMA memory, which needs endian converstion */
if (cfg_shdr)
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
- sizeof(union lpfc_sli4_cfg_shdr));
-
+ sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands. It is called after lpfc_sli4_config. The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+ uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+ uint8_t opcode = 0;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+ void *virtaddr = NULL;
+
+ /* Set up SLI4 ioctl command header fields */
+ if (emb == LPFC_SLI4_MBX_NEMBED) {
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ virtaddr = mbox->sge_array->addr[0];
+ if (virtaddr == NULL)
+ return 1;
+ n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ }
+
+ /*
+ * The resource type is common to all extent Opcodes and resides in the
+ * same position.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ rsrc_type);
+ else {
+ /* This is DMA data. Byteswap is required. */
+ bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+ n_rsrc_extnt, rsrc_type);
+ lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+ &n_rsrc_extnt->word4,
+ sizeof(uint32_t));
+ }
+
+ /* Complete the initialization for the particular Opcode. */
+ opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
+ switch (opcode) {
+ case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+ if (emb == LPFC_SLI4_MBX_EMBED)
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+ exts_count);
+ else
+ bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+ n_rsrc_extnt, exts_count);
+ break;
+ case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+ case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+ case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+ /* Initialization is complete.*/
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "2929 Resource Extent Opcode x%x is "
+ "unsupported\n", opcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
- bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
- bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
- bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+ bf_set(lpfc_init_vfi_vfi, init_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_init_vpi_vpi, init_vfi,
+ vport->phba->vpi_ids[vport->vpi]);
+ bf_set(lpfc_init_vfi_fcfi, init_vfi,
+ vport->phba->fcf.fcfi);
}
/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
- bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+ bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
- bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
- vpi + phba->vpi_base);
+ phba->vpi_ids[vpi]);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
- phba->pport->vfi + phba->vfi_base);
+ phba->sli4_hba.vfi_ids[phba->pport->vfi]);
}
/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
- vport->vfi + vport->phba->vfi_base);
+ vport->phba->sli4_hba.vfi_ids[vport->vfi]);
}
/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = ndlp->phba;
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
- bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+ bf_set(lpfc_resume_rpi_index, resume_rpi,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
lpfc_mem_alloc(struct lpfc_hba *phba, int align)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- int longs;
int i;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
phba->lpfc_hrb_pool = NULL;
phba->lpfc_drb_pool = NULL;
}
- /* vpi zero is reserved for the physical port so add 1 to max */
- longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
- phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
- if (!phba->vpi_bmask)
- goto fail_free_dbq_pool;
return 0;
-
- fail_free_dbq_pool:
- pci_pool_destroy(phba->lpfc_drb_pool);
- phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
int i;
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
- /* Free VPI bitmask memory */
- kfree(phba->vpi_bmask);
-
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea6..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /*
- * Need to unreg_login if we are already in one of these states and
- * change to NPR state. This will block the port until after the ACC
- * completes and the reg_login is issued and completed.
- */
+ /* no need to reg_login if we are already in one of these states */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_unreg_rpi(vport, ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+ return 1;
}
if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+
/**
* lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0246 RegLogin failed Data: x%x x%x x%x\n",
- did, mb->mbxStatus, vport->port_state);
+ "0246 RegLogin failed Data: x%x x%x x%x x%x "
+ "x%x\n",
+ did, mb->mbxStatus, vport->port_state,
+ mb->un.varRegLogin.vpi,
+ mb->un.varRegLogin.rpi);
/*
* If RegLogin failed due to lack of HBA resources do not
* retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
MAILBOX_t *mb = &pmb->u.mb;
if (!mb->mbxStatus) {
- ndlp->nlp_rpi = mb->un.varWords[0];
+ /* SLI4 ports have preallocated logical rpis. */
+ if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
} else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b2406..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
if (bcnt == 0)
continue;
/* Now, post the SCSI buffer list sgls as a block */
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
/* Reset SCSI buffer count for next round of posting */
bcnt = 0;
while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
- uint16_t iotag, last_xritag = NO_XRI;
+ uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
int status = 0, index;
int bcnt;
int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break;
}
- psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
- if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
+ psb->cur_iocbq.sli4_lxritag = lxri;
+ psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
if (last_xritag != NO_XRI
&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
}
if (bcnt) {
- status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+ if (!phba->sli4_hba.extents_in_use)
+ status = lpfc_sli4_post_scsi_sgl_block(phba,
+ &sblist,
+ bcnt);
+ else
+ status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+ &sblist,
+ bcnt);
+
+ if (status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3021 SCSI SGL post error %d\n",
+ status);
+ bcnt = 0;
+ }
/* Reset SCSI buffer count for next round of posting */
while (!list_empty(&sblist)) {
list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_len = sg_dma_len(sgel);
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((num_bde + 1) == nseg)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ piocbq->iocb.ulpContext =
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ piocb->ulpContext =
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ }
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
}
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
- "rpi x%x nlp_flag x%x\n",
+ "rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- pnode->nlp_rpi, pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+ iocbq->iocb_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
+ "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+ "iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4]);
+ iocbqrsp->iocb.un.ulpWord[4],
+ iocbq->iocb_flag);
} else if (status == IOCB_BUSY)
ret = FAILED;
else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c039..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_cqe *);
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
-
if (iocbq)
phba->iocb_cnt++;
if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+ phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
return sglq;
}
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
- uint16_t adj_xri;
struct lpfc_sglq *sglq;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
- return NULL;
- sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+
+ sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
return sglq;
}
@@ -532,7 +528,6 @@ static int
__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
- uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/*
* set the active bit even if there is no mem available.
*/
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
-
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
- if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = xritag;
+ rrq->xritag = phba->sli4_hba.xri_ids[xritag];
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
uint16_t xritag,
struct lpfc_node_rrq *rrq)
{
- uint16_t adj_xri;
struct lpfc_nodelist *ndlp = NULL;
if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
if (!ndlp)
goto out;
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+ if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
- uint16_t adj_xri;
-
- adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (!ndlp)
return 0;
- if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+ if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
return 1;
else
return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @piocb: Pointer to the iocbq.
*
* This function is called with hbalock held. This function
- * Gets a new driver sglq object from the sglq list. If the
+ * gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
**/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
- uint16_t adj_xri;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
while (!found) {
if (!sglq)
return NULL;
- adj_xri = sglq->sli4_xritag -
- phba->sli4_hba.max_cfg_param.xri_base;
if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
}
sglq->ndlp = ndlp;
found = 1;
- phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
sglq->state = SGL_ALLOCATED;
}
return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
- sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+ sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+ iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
- vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+ vpi = pmb->u.mb.un.varRegLogin.vpi;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
list_del_init(&phba->sli4_hba.els_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+ qindx = 0;
+ do
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+ while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
continue;
} else if (rc)
break;
+
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3;
+ int mode = 3, i;
+ int longs;
switch (lpfc_sli_mode) {
case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Initialize VPIs. */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ /*
+ * The VPI bitmask and physical ID array are allocated
+ * and initialized once only - at driver load. A port
+ * reset doesn't need to reinitialize this memory.
+ */
+ if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+ longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->vpi_bmask) {
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+
+ phba->vpi_ids = kzalloc(
+ (phba->max_vpi+1) * sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->vpi_ids) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto lpfc_sli_hba_setup_error;
+ }
+ for (i = 0; i < phba->max_vpi; i++)
+ phba->vpi_ids[i] = i;
+ }
+ }
+
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+ fcp_eqidx = 0;
+ do
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+static int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_count, uint16_t *extnt_size)
+{
+ int rc = 0;
+ uint32_t length;
+ uint32_t mbox_tmo;
+ struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Find out how many extents are available for this resource type */
+ length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the GET doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2930 Failed to get resource extents "
+ "Status 0x%x Add'l Status 0x%x\n",
+ bf_get(lpfc_mbox_hdr_status,
+ &rsrc_info->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &rsrc_info->header.cfg_shdr.response));
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+ &rsrc_info->u.rsp);
+ *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+ &rsrc_info->u.rsp);
+ err_exit:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ * -Error: error indicates problem.
+ * 1: Extent count or size has changed.
+ * 0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+ uint16_t curr_ext_cnt, rsrc_ext_cnt;
+ uint16_t size_diff, rsrc_ext_size;
+ int rc = 0;
+ struct lpfc_rsrc_blks *rsrc_entry;
+ struct list_head *rsrc_blk_list = NULL;
+
+ size_diff = 0;
+ curr_ext_cnt = 0;
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ break;
+ }
+
+ list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+ curr_ext_cnt++;
+ if (rsrc_entry->rsrc_size != rsrc_ext_size)
+ size_diff++;
+ }
+
+ if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+ rc = 1;
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request. It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ * -Error: Error value describes the condition found.
+ * 0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+ uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+ int rc = 0;
+ uint32_t req_len;
+ uint32_t emb_len;
+ uint32_t alloc_len, mbox_tmo;
+
+ /* Calculate the total requested length of the dma memory */
+ req_len = *extnt_cnt * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ *emb = LPFC_SLI4_MBX_EMBED;
+ if (req_len > emb_len) {
+ req_len = *extnt_cnt * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ *emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+ req_len, *emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "9000 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+ if (unlikely(rc))
+ return -EIO;
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ rc = -EIO;
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ bool emb = false;
+ uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+ uint16_t rsrc_id, rsrc_start, j, k;
+ uint16_t *ids;
+ int i, rc;
+ unsigned long longs;
+ unsigned long *bmask;
+ struct lpfc_rsrc_blks *rsrc_blks;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t length;
+ struct lpfc_id_range *id_array = NULL;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ struct list_head *ext_blk_list;
+
+ rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+ &rsrc_cnt,
+ &rsrc_size);
+ if (unlikely(rc))
+ return -EIO;
+
+ if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "3009 No available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+ "2903 Available Resource Extents "
+ "for resource type 0x%x: Count: 0x%x, "
+ "Size 0x%x\n", type, rsrc_cnt,
+ rsrc_size);
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ id_array = &rsrc_ext->u.rsp.id[0];
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ id_array = &n_rsrc->id;
+ }
+
+ longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+ /*
+ * Based on the resource size and count, correct the base and max
+ * resource values.
+ */
+ length = sizeof(struct lpfc_rsrc_blks);
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ kfree(phba->sli4_hba.rpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /*
+ * The next_rpi was initialized with the maximum available
+ * count but the port may allocate a smaller number. Catch
+ * that case and update the next_rpi.
+ */
+ phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.rpi_bmask;
+ ids = phba->sli4_hba.rpi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->vpi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ kfree(phba->vpi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->vpi_bmask;
+ ids = phba->vpi_ids;
+ ext_blk_list = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ kfree(phba->sli4_hba.xri_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.xri_bmask;
+ ids = phba->sli4_hba.xri_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ kfree(phba->sli4_hba.vfi_bmask);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ /* Initialize local ptrs for common extent processing later. */
+ bmask = phba->sli4_hba.vfi_bmask;
+ ids = phba->sli4_hba.vfi_ids;
+ ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ default:
+ /* Unsupported Opcode. Fail call. */
+ id_array = NULL;
+ bmask = NULL;
+ ids = NULL;
+ ext_blk_list = NULL;
+ goto err_exit;
+ }
+
+ /*
+ * Complete initializing the extent configuration with the
+ * allocated ids assigned to this function. The bitmask serves
+ * as an index into the array and manages the available ids. The
+ * array just stores the ids communicated to the port via the wqes.
+ */
+ for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+ if ((i % 2) == 0)
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+ &id_array[k]);
+ else
+ rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+ &id_array[k]);
+
+ rsrc_blks = kzalloc(length, GFP_KERNEL);
+ if (unlikely(!rsrc_blks)) {
+ rc = -ENOMEM;
+ kfree(bmask);
+ kfree(ids);
+ goto err_exit;
+ }
+ rsrc_blks->rsrc_start = rsrc_id;
+ rsrc_blks->rsrc_size = rsrc_size;
+ list_add_tail(&rsrc_blks->list, ext_blk_list);
+ rsrc_start = rsrc_id;
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ phba->sli4_hba.scsi_xri_start = rsrc_start +
+ lpfc_sli4_get_els_iocb_cnt(phba);
+
+ while (rsrc_id < (rsrc_start + rsrc_size)) {
+ ids[j] = rsrc_id;
+ rsrc_id++;
+ j++;
+ }
+ /* Entire word processed. Get next word.*/
+ if ((i % 2) == 1)
+ k++;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range. It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+ int rc;
+ uint32_t length, mbox_tmo = 0;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+ struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /*
+ * This function sends an embedded mailbox because it only sends the
+ * the resource type. All extents of this type are released by the
+ * port.
+ */
+ length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ /* Send an extents count of 0 - the dealloc doesn't use it. */
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+ LPFC_SLI4_MBX_EMBED);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+ if (bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2919 Failed to release resource extents "
+ "for type %d - Status 0x%x Add'l Status 0x%x. "
+ "Resource memory not released.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status,
+ &dealloc_rsrc->header.cfg_shdr.response),
+ bf_get(lpfc_mbox_hdr_add_status,
+ &dealloc_rsrc->header.cfg_shdr.response));
+ rc = -EIO;
+ goto out_free_mbox;
+ }
+
+ /* Release kernel memory resources for the specific type. */
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->lpfc_vpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_xri_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ /* RPI bitmask and physical id array are cleaned up earlier. */
+ list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+ &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+ list_del_init(&rsrc_blk->list);
+ kfree(rsrc_blk);
+ }
+ break;
+ default:
+ break;
+ }
+
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ int i, rc, error = 0;
+ uint16_t count, base;
+ unsigned long longs;
+
+ if (phba->sli4_hba.extents_in_use) {
+ /*
+ * The port supports resource extents. The XRI, VPI, VFI, RPI
+ * resource extent count must be read and allocated before
+ * provisioning the resource id arrays.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY) {
+ /*
+ * Extent-based resources are set - the driver could
+ * be in a port reset. Figure out if any corrective
+ * actions need to be taken.
+ */
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ if (rc != 0)
+ error++;
+ rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ if (rc != 0)
+ error++;
+
+ /*
+ * It's possible that the number of resources
+ * provided to this port instance changed between
+ * resets. Detect this condition and reallocate
+ * resources. Otherwise, there is no action.
+ */
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_INIT,
+ "2931 Detected extent resource "
+ "change. Reallocating all "
+ "extents.\n");
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VFI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_VPI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_XRI);
+ rc = lpfc_sli4_dealloc_extent(phba,
+ LPFC_RSC_TYPE_FCOE_RPI);
+ } else
+ return 0;
+ }
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ if (unlikely(rc))
+ goto err_exit;
+
+ rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ if (unlikely(rc))
+ goto err_exit;
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return rc;
+ } else {
+ /*
+ * The port does not support resource extents. The XRI, VPI,
+ * VFI, RPI resource ids were determined from READ_CONFIG.
+ * Just allocate the bitmasks and provision the resource id
+ * arrays. If a port reset is active, the resources don't
+ * need any action - just exit.
+ */
+ if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+ LPFC_IDX_RSRC_RDY)
+ return 0;
+
+ /* RPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_rpi;
+ base = phba->sli4_hba.max_cfg_param.rpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.rpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ phba->sli4_hba.rpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.rpi_ids)) {
+ rc = -ENOMEM;
+ goto free_rpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.rpi_ids[i] = base + i;
+
+ /* VPIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vpi;
+ base = phba->sli4_hba.max_cfg_param.vpi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_bmask)) {
+ rc = -ENOMEM;
+ goto free_rpi_ids;
+ }
+ phba->vpi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->vpi_ids)) {
+ rc = -ENOMEM;
+ goto free_vpi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->vpi_ids[i] = base + i;
+
+ /* XRIs. */
+ count = phba->sli4_hba.max_cfg_param.max_xri;
+ base = phba->sli4_hba.max_cfg_param.xri_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.xri_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_bmask)) {
+ rc = -ENOMEM;
+ goto free_vpi_ids;
+ }
+ phba->sli4_hba.xri_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.xri_ids)) {
+ rc = -ENOMEM;
+ goto free_xri_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.xri_ids[i] = base + i;
+
+ /* VFIs. */
+ count = phba->sli4_hba.max_cfg_param.max_vfi;
+ base = phba->sli4_hba.max_cfg_param.vfi_base;
+ longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->sli4_hba.vfi_bmask = kzalloc(longs *
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+ rc = -ENOMEM;
+ goto free_xri_ids;
+ }
+ phba->sli4_hba.vfi_ids = kzalloc(count *
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (unlikely(!phba->sli4_hba.vfi_ids)) {
+ rc = -ENOMEM;
+ goto free_vfi_bmask;
+ }
+
+ for (i = 0; i < count; i++)
+ phba->sli4_hba.vfi_ids[i] = base + i;
+
+ /*
+ * Mark all resources ready. An HBA reset doesn't need
+ * to reset the initialization.
+ */
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_IDX_RSRC_RDY);
+ return 0;
+ }
+
+ free_vfi_bmask:
+ kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+ kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+ kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+ kfree(phba->vpi_ids);
+ free_vpi_bmask:
+ kfree(phba->vpi_bmask);
+ free_rpi_ids:
+ kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+ kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+ return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+ if (phba->sli4_hba.extents_in_use) {
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+ lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+ } else {
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+ bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.xri_bmask);
+ kfree(phba->sli4_hba.xri_ids);
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ kfree(phba->sli4_hba.vfi_bmask);
+ kfree(phba->sli4_hba.vfi_ids);
+ bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
- /*
- * TODO: Why does this routine execute these task in a different
- * order from probe?
- */
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* to read FCoE param config regions
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /*
+ * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
+ * calls depends on these resources to complete port setup.
+ */
+ rc = lpfc_sli4_alloc_resource_identifiers(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2920 Failed to alloc Resource IDs "
+ "rc = x%x\n", rc);
+ goto out_free_mbox;
+ }
+
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
- if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
- memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
+ lpfc_update_vport_wwn(vport);
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
/* Register SGL pool to the device using non-embedded mailbox command */
- rc = lpfc_sli4_post_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0582 Error %d during sgl post operation\n",
- rc);
- rc = -ENODEV;
- goto out_free_mbox;
+ if (!phba->sli4_hba.extents_in_use) {
+ rc = lpfc_sli4_post_els_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0582 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ } else {
+ rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "2560 Error %d during els sgl post "
+ "operation\n", rc);
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
}
/* Register SCSI SGL pool to the device */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0383 Error %d during scsi sgl post "
"operation\n", rc);
/* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
-
- /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+ /*
+ * When the CQE status indicates a failure and the mailbox status
+ * indicates success then copy the CQE status into the mailbox status
+ * (and prefix it with x4000).
+ */
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
- bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+ if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+ bf_set(lpfc_mqe_status, mb,
+ (LPFC_MBX_ERROR_RANGE | mcqe_status));
rc = MBXERR_ERROR;
} else
lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
else
rc = -EIO;
if (rc != MBX_SUCCESS)
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
"(x%x) cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
+ sgl->word2 = le32_to_cpu(sgl->word2);
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi);
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- iocbq->vport->vpi + phba->vpi_base);
+ phba->vpi_ids[iocbq->vport->vpi]);
bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi);
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
return IOCB_ERROR;
break;
}
+
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_BUSY;
}
} else {
- sglq = __lpfc_sli_get_sglq(phba, piocb);
+ sglq = __lpfc_sli_get_sglq(phba, piocb);
if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
- sglq = NULL; /* These IO's already have an XRI and
- * a mapped sgl.
- */
+ /* These IO's already have an XRI and a mapped sgl. */
+ sglq = NULL;
} else {
- /* This is a continuation of a commandi,(CX) so this
+ /*
+ * This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
if (sglq) {
+ piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
-
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
+ if (cq->subtype == LPFC_FCP)
+ workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ cqe);
+ else
+ workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+ cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents. This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 0 - successful
+ * -ERROR - otherwise.
+ */
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+ unsigned long xri;
+
+ /*
+ * Fetch the next logical xri. Because this index is logical,
+ * the driver starts at 0 each time.
+ */
+ spin_lock_irq(&phba->hbalock);
+ xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri, 0);
+ if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+ spin_unlock_irq(&phba->hbalock);
+ return NO_XRI;
+ } else {
+ set_bit(xri, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+ phba->sli4_hba.xri_count--;
+ phba->sli4_hba.max_cfg_param.xri_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_sli4_free_xri(phba, xri);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
* lpfc_sli4_next_xritag - Get an xritag for the io
* @phba: Pointer to HBA context object.
*
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba *phba)
{
- uint16_t xritag;
+ uint16_t xri_index;
- spin_lock_irq(&phba->hbalock);
- xritag = phba->sli4_hba.next_xri;
- if ((xritag != (uint16_t) -1) && xritag <
- (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base)) {
- phba->sli4_hba.next_xri++;
- phba->sli4_hba.max_cfg_param.xri_used++;
- spin_unlock_irq(&phba->hbalock);
- return xritag;
- }
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ xri_index = lpfc_sli4_alloc_xri(phba);
+ if (xri_index != NO_XRI)
+ return xri_index;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
- phba->sli4_hba.next_xri,
+ xri_index,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.xri_used);
- return -1;
+ return NO_XRI;
}
/**
- * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* stopped.
**/
int
-lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
- uint16_t xritag_start = 0;
+ uint16_t xritag_start = 0, lxri = 0;
int els_xri_cnt, rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2560 Failed to allocate mbox cmd memory\n");
+ if (!mbox)
return -ENOMEM;
- }
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
- /* Get the first SGE entry from the non-embedded DMA memory */
- viraddr = mbox->sge_array->addr[0];
-
/* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+ /*
+ * Assign the sglq a physical xri only if the driver has not
+ * initialized those resources. A port reset only needs
+ * the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
cpu_to_le32(putPaddrLow(0));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(0));
+
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
}
+
+ /* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
- /* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
-
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
+
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, index;
+ uint32_t mbox_tmo;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t xritag_start = 0, lxri = 0;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ int cnt, ttl_cnt, rc = 0;
+ int loop_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* The number of sgls to be posted */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2989 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ cnt = 0;
+ ttl_cnt = 0;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_start = rsrc_blk->rsrc_start;
+ rsrc_size = rsrc_blk->rsrc_size;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3014 Working ELS Extent start %d, cnt %d\n",
+ rsrc_start, rsrc_size);
+
+ loop_cnt = min(els_xri_cnt, rsrc_size);
+ if (ttl_cnt + loop_cnt >= els_xri_cnt) {
+ loop_cnt = els_xri_cnt - ttl_cnt;
+ ttl_cnt = els_xri_cnt;
+ }
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command.
+ */
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen, LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2987 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ viraddr = mbox->sge_array->addr[0];
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /*
+ * The starting resource may not begin at zero. Control
+ * the loop variants via the block resource parameters,
+ * but handle the sge pointers with a zero-based index
+ * that doesn't get reset per loop pass.
+ */
+ for (index = rsrc_start;
+ index < rsrc_start + loop_cnt;
+ index++) {
+ sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+ /*
+ * Assign the sglq a physical xri only if the driver
+ * has not initialized those resources. A port reset
+ * only needs the sglq's posted.
+ */
+ if (bf_get(lpfc_xri_rsrc_rdy,
+ &phba->sli4_hba.sli4_flags) !=
+ LPFC_XRI_RSRC_RDY) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag =
+ phba->sli4_hba.xri_ids[lxri];
+ }
+
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(0));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(0));
+
+ /* Track the starting physical XRI for the mailbox. */
+ if (index == rsrc_start)
+ xritag_start = sglq_entry->sli4_xritag;
+ sgl_pg_pairs++;
+ cnt++;
+ }
+
+ /* Complete initialization and perform endian conversion. */
+ rsrc_blk->rsrc_used += loop_cnt;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3015 Post ELS Extent SGL, start %d, "
+ "cnt %d, used %d\n",
+ xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2988 POST_SGL_BLOCK mailbox "
+ "command failed status x%x "
+ "add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ goto err_exit;
+ }
+ if (ttl_cnt >= els_xri_cnt)
+ break;
+ }
+
+ err_exit:
+ if (rc == 0)
+ bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_XRI_RSRC_RDY);
return rc;
}
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
+
/* Get the first SGE entry from the non-embedded DMA memory */
viraddr = mbox->sge_array->addr[0];
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
}
/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+ int cnt)
+{
+ struct lpfc_scsi_buf *psb = NULL;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xri_start = 0, scsi_xri_start;
+ uint16_t rsrc_range;
+ int rc = 0, avail_cnt;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ uint32_t xri_cnt = 0;
+
+ /* Calculate the total requested length of the dma memory */
+ reqlen = cnt * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "2932 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+
+ /*
+ * The use of extents requires the driver to post the sgl headers
+ * in multiple postings to meet the contiguous resource assignment.
+ */
+ psb = list_prepare_entry(psb, sblist, list);
+ scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+ list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+ if (rsrc_range < scsi_xri_start)
+ continue;
+ else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+ continue;
+ else
+ avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+ reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ /*
+ * Allocate DMA memory and set up the non-embedded mailbox
+ * command. The mbox is used to post an SGL page per loop
+ * but the DMA memory has a use-once semantic so the mailbox
+ * is used and freed per loop pass.
+ */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2933 Failed to allocate mbox cmd "
+ "memory\n");
+ return -ENOMEM;
+ }
+ alloclen = lpfc_sli4_config(phba, mbox,
+ LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+ reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2934 Allocated DMA memory size (%d) "
+ "is less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ /* pg_pairs tracks posted SGEs per loop iteration. */
+ pg_pairs = 0;
+ list_for_each_entry_continue(psb, sblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = psb->dma_phys_bpl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xri for this extent. */
+ if (pg_pairs == 0)
+ xri_start = psb->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ xri_cnt++;
+
+ /*
+ * Track two exit conditions - the loop has constructed
+ * all of the caller's SGE pairs or all available
+ * resource IDs in this extent are consumed.
+ */
+ if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+ break;
+ }
+ rsrc_blk->rsrc_used += pg_pairs;
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3016 Post SCSI Extent SGL, start %d, cnt %d "
+ "blk use %d\n",
+ xri_start, pg_pairs, rsrc_blk->rsrc_used);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2935 POST_SGL_BLOCK mailbox command "
+ "failed status x%x add_status x%x "
+ "mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ return -ENXIO;
+ }
+
+ /* Post only what is requested. */
+ if (xri_cnt >= cnt)
+ break;
+ }
+ return rc;
+}
+
+/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+static uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+ uint16_t xri)
+{
+ int i;
+
+ for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+ if (xri == phba->sli4_hba.xri_ids[i])
+ return i;
+ }
+ return NO_XRI;
+}
+
+
+/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
"SID:x%x\n", oxid, sid);
return;
}
- if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
- && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
- + phba->sli4_hba.max_cfg_param.xri_base))
+ if (lpfc_sli4_xri_inrange(phba, rxid))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
/* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpBdeCount = 0;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+ icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = ndlp;
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
/* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->vpi + vport->phba->vpi_base;
+ /* iocbq is prepped for internal consumption. Logical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
&phba->sli.ring[LPFC_ELS_RING],
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_page;
uint32_t rc = 0;
+ uint16_t lrpi = 0;
+
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ goto exit;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
- /* Post all rpi memory regions to the port. */
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+ /*
+ * Assign the rpi headers a physical rpi only if the driver
+ * has not initialized those resources. A port reset only
+ * needs the headers posted.
+ */
+ if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+ LPFC_RPI_RSRC_RDY)
+ rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
}
}
+ exit:
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+ LPFC_RPI_RSRC_RDY);
return rc;
}
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
uint32_t rc = 0;
- uint32_t mbox_tmo;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* SLI4 ports that support extents do not require RPI headers. */
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ return rc;
+ if (phba->sli4_hba.extents_in_use)
+ return -EIO;
+
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
/* Post all rpi memory regions to the port. */
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
- mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
sizeof(struct lpfc_sli4_cfg_mhdr),
LPFC_SLI4_MBX_EMBED);
- bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
- hdr_tmpl, rpi_page->page_count);
+
+
+ /* Post the physical rpi to the port for this rpi header. */
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
rpi_page->start_rpi);
+ bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+ hdr_tmpl, rpi_page->page_count);
+
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
int
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
{
- int rpi;
- uint16_t max_rpi, rpi_base, rpi_limit;
- uint16_t rpi_remaining;
+ unsigned long rpi;
+ uint16_t max_rpi, rpi_limit;
+ uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
- rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
rpi_limit = phba->sli4_hba.next_rpi;
/*
- * The valid rpi range is not guaranteed to be zero-based. Start
- * the search at the rpi_base as reported by the port.
+ * Fetch the next logical rpi. Because this index is logical,
+ * the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
- if (rpi >= rpi_limit || rpi < rpi_base)
+ rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
/*
* Don't try to allocate more rpi header regions if the device limit
- * on available rpis max has been exhausted.
+ * has been exhausted.
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/*
+ * RPI header postings are not required for SLI4 ports capable of
+ * extents.
+ */
+ if (!phba->sli4_hba.rpi_hdrs_in_use) {
+ spin_unlock_irq(&phba->hbalock);
+ return rpi;
+ }
+
+ /*
* If the driver is running low on rpi resources, allocate another
* page now. Note that the next_rpi value is used because
* it represents how many are actually in use whereas max_rpi notes
* how many are supported max by the device.
*/
- rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
- phba->sli4_hba.rpi_count;
+ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
"2002 Error Could not grow rpi "
"count\n");
} else {
+ lrpi = rpi_hdr->start_rpi;
+ rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
}
}
@@ -12751,6 +14069,8 @@ void
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
{
kfree(phba->sli4_hba.rpi_bmask);
+ kfree(phba->sli4_hba.rpi_ids);
+ bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
}
/**
@@ -13490,6 +14810,96 @@ out:
}
/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ uint32_t size, uint32_t *offset)
+{
+ struct lpfc_mbx_wr_object *wr_object;
+ LPFC_MBOXQ_t *mbox;
+ int rc = 0, i = 0;
+ uint32_t shdr_status, shdr_add_status;
+ uint32_t mbox_tmo;
+ union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_dmabuf *dmabuf;
+ uint32_t written = 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_WRITE_OBJECT,
+ sizeof(struct lpfc_mbx_wr_object) -
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+ wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+ wr_object->u.request.write_offset = *offset;
+ sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+ wr_object->u.request.object_name[0] =
+ cpu_to_le32(wr_object->u.request.object_name[0]);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+ list_for_each_entry(dmabuf, dmabuf_list, list) {
+ if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+ break;
+ wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+ wr_object->u.request.bde[i].addrHigh =
+ putPaddrHigh(dmabuf->phys);
+ if (written + SLI4_PAGE_SIZE >= size) {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ (size - written);
+ written += (size - written);
+ bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+ } else {
+ wr_object->u.request.bde[i].tus.f.bdeSize =
+ SLI4_PAGE_SIZE;
+ written += SLI4_PAGE_SIZE;
+ }
+ i++;
+ }
+ wr_object->u.request.bde_count = i;
+ bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3025 Write Object mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ } else
+ *offset += wr_object->u.response.actual_write_length;
+ return rc;
+}
+
+/**
* lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
* @vport: pointer to vport data structure.
*
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
* never happen
*/
sglq = __lpfc_clear_active_sglq(phba,
- sglq->sli4_xritag);
+ sglq->sli4_lxritag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
/* The xri and iocb resources secured,
* attempt to issue request
*/
+ piocbq->sli4_lxritag = sglq->sli4_lxritag;
piocbq->sli4_xritag = sglq->sli4_xritag;
if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
struct list_head clist;
struct list_head dlist;
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct lpfc_cq_event cq_event;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2ce..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
uint16_t vfi_base;
uint16_t vfi_used;
uint16_t max_fcfi;
- uint16_t fcfi_base;
uint16_t fcfi_used;
uint16_t max_eq;
uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
uint8_t rqv;
};
+struct lpfc_iov {
+ uint32_t pf_number;
+ uint32_t vf_number;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
uint32_t intr_enable;
struct lpfc_bmbx bmbx;
struct lpfc_max_cfg_param max_cfg_param;
+ uint16_t extents_in_use; /* must allocate resource extents. */
+ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
+ uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
+ uint16_t *rpi_ids;
uint16_t rpi_count;
+ struct list_head lpfc_rpi_blk_list;
+ unsigned long *xri_bmask;
+ uint16_t *xri_ids;
+ uint16_t xri_count;
+ struct list_head lpfc_xri_blk_list;
+ unsigned long *vfi_bmask;
+ uint16_t *vfi_ids;
+ uint16_t vfi_count;
+ struct list_head lpfc_vfi_blk_list;
struct lpfc_sli4_flags sli4_flags;
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
struct list_head sp_els_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
+ struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
enum lpfc_sgl_state state;
struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
uint16_t iotag; /* pre-assigned IO tag */
+ uint16_t sli4_lxritag; /* logical pre-assigned xri. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
uint32_t start_rpi;
};
+struct lpfc_rsrc_blks {
+ struct list_head list;
+ uint16_t rsrc_start;
+ uint16_t rsrc_size;
+ uint16_t rsrc_used;
+};
+
/*
* SLI4 specific function prototypes
*/
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
+ int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
static int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
- int vpi;
+ unsigned long vpi;
spin_lock_irq(&phba->hbalock);
/* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc672ec1..7370c084b178 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.34-rc1"
-#define MEGASAS_RELDATE "Feb. 24, 2011"
-#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.05.38-rc1"
+#define MEGASAS_RELDATE "May. 11, 2011"
+#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
/*
* Device IDs
@@ -76,8 +76,8 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
-#define MFI_RESET_REQUIRED 0x00000001
-
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623ebadbc..2d8cdce7b2f5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.34-rc1
+ * Version : v00.00.05.38-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
static int
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
{
- u32 status;
+ u32 status, mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
status = readl(&regs->outbound_intr_status);
- if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
- return 0;
- }
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
/*
* Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
/* Dummy readl to force pci flush */
readl(&regs->outbound_doorbell_clear);
- return 1;
+ return mfiStatus;
}
+
/**
* megasas_fire_cmd_ppc - Sends command to the FW
* @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_ppc - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_ppc(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_ppc - For controller reset check
* @regs: MFI register set
*/
@@ -495,8 +488,12 @@ static int
megasas_check_reset_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
+
static struct megasas_instance_template megasas_instance_template_ppc = {
.fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
.disable_intr = megasas_disable_intr_ppc,
.clear_intr = megasas_clear_intr_ppc,
.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
- .adp_reset = megasas_adp_reset_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
.check_reset = megasas_check_reset_ppc,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
megasas_check_reset_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
return 0;
}
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
{
u32 max_sectors_1;
u32 max_sectors_2;
- u32 tmp_sectors;
+ u32 tmp_sectors, msix_enable;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable &&
+ !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
+ instance->msi_flag = 1;
+
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /* Try to enable MSI-X */
- if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
- !msix_disable && !pci_enable_msix(instance->pdev,
- &instance->msixentry, 1))
- instance->msi_flag = 1;
-
/*
* Initialize MFI Firmware
*/
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
megasas_mgmt_info.max_index++;
/*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+
+ /*
* Initiate AEN (Asynchronous Event Notification)
*/
if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail_start_aen;
}
- /*
- * Register with SCSI mid-layer
- */
- if (megasas_io_attach(instance))
- goto fail_io_attach;
-
- instance->unload = 0;
return 0;
fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Initialize MFI Firmware
*/
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_transition_to_ready(instance))
goto fail_ready_state;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
{
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
instance->instancet->enable_intr(instance->reg_set);
- /*
- * Initiate AEN (Asynchronous Event Notification)
- */
- if (megasas_start_aen(instance))
- printk(KERN_ERR "megasas: Start AEN failed\n");
-
/* Initialize the cmd completion timer */
if (poll_mode_io)
megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
return 0;
fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
instance->unload = 1;
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance->reg_set);
+ free_irq(instance->msi_flag ? instance->msixentry.vector :
+ instance->pdev->irq, instance);
+ if (instance->msi_flag)
+ pci_disable_msix(instance->pdev);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cffb1fa..f13e7abd345a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
}
/*
- * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
- * @instance: Adapter soft state
- *
- */
-void
-megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
-{
- struct fusion_context *fusion;
- struct megasas_cmd_fusion *cmd;
-
- fusion = instance->ctrl_context;
- cmd = fusion->cmd_list[smid - 1];
- megasas_return_cmd_fusion(instance, cmd);
-}
-
-/*
* megasas_get_ld_map_info - Returns FW's ld_map structure
* @instance: Adapter soft state
* @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
u64 start_blk = io_info->pdBlock;
u8 *cdb = io_request->CDB.CDB32;
u32 num_blocks = io_info->numBlocks;
- u8 opcode, flagvals, groupnum, control;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
/* Check if T10 PI (DIF) is enabled for this LD */
ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
}
/* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
- u32 host_diag, abs_state;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
- mutex_lock(&instance->reset_mutex);
- set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
- instance->instancet->disable_intr(instance->reg_set);
- msleep(1000);
-
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
goto out;
}
+ mutex_lock(&instance->reset_mutex);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->instancet->disable_intr(instance->reg_set);
+ msleep(1000);
+
/* First try waiting for commands to complete */
if (megasas_wait_for_outstanding_fusion(instance)) {
printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
}
}
- if (instance->disableOnlineCtrlReset == 1) {
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
/* Reset not supported, kill adapter */
printk(KERN_WARNING "megaraid_sas: Reset not supported"
", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
/* Check that the diag write enable (DRWE) bit is on */
host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
msleep(100);
host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
msleep(100);
abs_state =
instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ instance->reg_set) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f6db8b..dcc289c25459 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "08.100.00.01"
+#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
-#define MPT2SAS_RELEASE_VERSION 01
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e02358d6d..a7dbc6825f5f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
};
+#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
/**
@@ -121,6 +122,7 @@ struct sense_info {
* @work: work object (ioc->fault_reset_work_q)
* @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
+ * @device_handle: device handle
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
u8 cancel_pending_work;
struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
+ u16 device_handle;
u8 VF_ID;
u8 VP_ID;
u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
switch (prot_type) {
case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
/*
* enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
cpu_to_be32(scsi_get_lba(scmd));
break;
- case SCSI_PROT_DIF_TYPE2:
-
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- break;
-
case SCSI_PROT_DIF_TYPE3:
/*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
#endif
/**
- * _scsih_smart_predicted_fault - illuminate Fault LED
+ * _scsih_turn_on_fault_led - illuminate Fault LED
* @ioc: per adapter object
* @handle: device handle
+ * Context: process
*
* Return nothing.
*/
static void
-_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
+ "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
struct scsi_target *starget;
struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
starget_printk(KERN_WARNING, starget, "predicted fault\n");
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
- memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
- mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
- mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
- mpi_request.SlotStatus =
- cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
- mpi_request.DevHandle = cpu_to_le16(handle);
- mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
- if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
- &mpi_request)) != 0) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
-
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "enclosure_processor: ioc_status (0x%04x), "
- "loginfo(0x%08x)\n", ioc->name,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
- return;
- }
- }
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
}
switch (fw_event->event) {
+ case MPT2SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4740e9..de0b1a704fb5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
break;
if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
+ int j;
+
STp->pos_unknown = 0;
STp->partition = STp->new_partition = 0;
if (STp->can_partitions)
STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
+ for (j = 0; j < ST_NBR_PARTITIONS; j++) {
+ STps = &(STp->ps[j]);
STps->rw = ST_IDLE;
STps->eof = ST_NOEOF;
STps->at_sm = 0;
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff03a535..252523d7847e 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
- ql4_nx.o ql4_nvram.o ql4_dbg.o
+ ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000000..864d018631c0
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2003-2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+
+/* Scsi_Host attributes. */
+static ssize_t
+qla4xxx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+ if (is_qla8022(ha))
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+ else
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->firmware_version[0],
+ ha->firmware_version[1],
+ ha->patch_number, ha->build_number);
+}
+
+static ssize_t
+qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
+}
+
+static ssize_t
+qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
+ ha->iscsi_minor);
+}
+
+static ssize_t
+qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+ ha->bootload_major, ha->bootload_minor,
+ ha->bootload_patch, ha->bootload_build);
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
+static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
+static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+
+struct device_attribute *qla4xxx_host_attrs[] = {
+ &dev_attr_fw_version,
+ &dev_attr_serial_num,
+ &dev_attr_iscsi_version,
+ &dev_attr_optrom_version,
+ NULL,
+};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878d59dd..473c5c872b39 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
#define INVALID_ENTRY 0xFFFF
#define MAX_CMDS_TO_RISC 1024
#define MAX_SRBS MAX_CMDS_TO_RISC
-#define MBOX_AEN_REG_COUNT 5
+#define MBOX_AEN_REG_COUNT 8
#define MAX_INIT_RETRIES 5
/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
-#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
uint32_t nx_reset_timeout;
struct completion mbx_intr_comp;
+
+ /* --- From About Firmware --- */
+ uint16_t iscsi_major;
+ uint16_t iscsi_minor;
+ uint16_t bootload_major;
+ uint16_t bootload_minor;
+ uint16_t bootload_patch;
+ uint16_t bootload_build;
};
static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf97198c..01082aa77098 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
uint8_t reserved[12]; /* 34-3f */
};
+struct about_fw_info {
+ uint16_t fw_major; /* 00 - 01 */
+ uint16_t fw_minor; /* 02 - 03 */
+ uint16_t fw_patch; /* 04 - 05 */
+ uint16_t fw_build; /* 06 - 07 */
+ uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
+ uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
+ uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
+ uint16_t fw_load_source; /* 38 - 39 */
+ /* 1 = Flash Primary,
+ 2 = Flash Secondary,
+ 3 = Host Download
+ */
+ uint8_t reserved1[6]; /* 3A - 3F */
+ uint16_t iscsi_major; /* 40 - 41 */
+ uint16_t iscsi_minor; /* 42 - 43 */
+ uint16_t bootload_major; /* 44 - 45 */
+ uint16_t bootload_minor; /* 46 - 47 */
+ uint16_t bootload_patch; /* 48 - 49 */
+ uint16_t bootload_build; /* 4A - 4B */
+ uint8_t reserved2[180]; /* 4C - FF */
+};
+
struct crash_record {
uint16_t fw_major_version; /* 00 - 01 */
uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3fbd78c..a53a256c1f8d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
+int qla4xxx_about_firmware(struct scsi_qla_host *ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241ddaf4..42ed5db2d530 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_get_fw_version(ha) == QLA_ERROR)
+ if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac761cd4..0e72921c752d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
- if (sense_len == 0)
+ if (sense_len == 0) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
+ " sense len 0\n", ha->host_no,
+ cmd->device->channel, cmd->device->id,
+ cmd->device->lun, __func__));
+ ha->status_srb = NULL;
return;
-
+ }
/* Save total available sense length,
* not to exceed cmd's sense buffer size */
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
+ case MBOX_ASTS_DUPLICATE_IP:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_sts[i];
/* print debug message */
- DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
- " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
- ha->host_no, ha->aen_in, mbox_sts[0],
- mbox_sts[1], mbox_sts[2], mbox_sts[3],
- mbox_sts[4]));
+ DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
+ "mb1:0x%x mb2:0x%x mb3:0x%x "
+ "mb4:0x%x mb5:0x%x\n",
+ ha->host_no, ha->aen_in,
+ mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5]));
/* advance pointer */
ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58dc5011..fce8289e9752 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
- /* To prevent overwriting mailbox registers for a command that has
- * not yet been serviced, check to see if an active command
- * (AEN, IOCB, etc.) is interrupting, then service it.
- * -----------------------------------------------------------------
- */
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!is_qla8022(ha)) {
- intr_status = readl(&ha->reg->ctrl_status);
- if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
- /* Service existing interrupt */
- ha->isp_ops->interrupt_service_routine(ha, intr_status);
- clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
- }
- }
-
ha->mbox_status_count = outCount;
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
}
/**
- * qla4xxx_get_fw_version - gets firmware version
+ * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
* @ha: Pointer to host adapter structure.
*
- * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
- * hold an address for data. Make sure that we write 0 to those mailboxes,
- * if unused.
+ * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
+ * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
+ * those mailboxes, if unused.
**/
-int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
+int qla4xxx_about_firmware(struct scsi_qla_host *ha)
{
+ struct about_fw_info *about_fw = NULL;
+ dma_addr_t about_fw_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status = QLA_ERROR;
+
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
+ if (!about_fw) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
+ "for about_fw\n", __func__));
+ return status;
+ }
- /* Get firmware version. */
+ memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
- "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
- return QLA_ERROR;
+ mbox_cmd[2] = LSDW(about_fw_dma);
+ mbox_cmd[3] = MSDW(about_fw_dma);
+ mbox_cmd[4] = sizeof(struct about_fw_info);
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ &mbox_cmd[0], &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
+ "failed w/ status %04X\n", __func__,
+ mbox_sts[0]));
+ goto exit_about_fw;
}
- /* Save firmware version information. */
- ha->firmware_version[0] = mbox_sts[1];
- ha->firmware_version[1] = mbox_sts[2];
- ha->patch_number = mbox_sts[3];
- ha->build_number = mbox_sts[4];
+ /* Save version information. */
+ ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
+ ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
+ ha->patch_number = le16_to_cpu(about_fw->fw_patch);
+ ha->build_number = le16_to_cpu(about_fw->fw_build);
+ ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
+ status = QLA_SUCCESS;
- return QLA_SUCCESS;
+exit_about_fw:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
+ about_fw, about_fw_dma);
+ return status;
}
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b2fe0b..fdfe27b38698 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
/* Halt all the indiviual PEGs and other blocks of the ISP */
qla4_8xxx_rom_lock(ha);
- /* mask all niu interrupts */
+ /* disable all I2Q */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
/* disable xge rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
/* disable xg1 rx/tx */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
/* halt pegs */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(5);
/* big hammer */
- msleep(1000);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
/* don't reset CAM block on reset */
qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a764d9d..f2364ec59f03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
+ .shost_attrs = qla4xxx_host_attrs,
};
static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
- struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
struct srb *srb;
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
srb->cmd = cmd;
srb->flags = 0;
CMD_SP(cmd) = (void *)srb;
- cmd->scsi_done = done;
return srb;
}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
/**
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @host: scsi host
* @cmd: Pointer to Linux's SCSI command structure
- * @done_fn: Function that the driver calls to notify the SCSI mid-layer
- * that the command has been processed.
*
* Remarks:
* This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
* completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
-static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
- struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ struct scsi_qla_host *ha = to_qla_host(host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
- spin_unlock_irq(ha->host->host_lock);
-
- srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
+ srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
if (!srb)
- goto qc_host_busy_lock;
+ goto qc_host_busy;
rval = qla4xxx_send_command_to_isp(ha, srb);
if (rval != QLA_SUCCESS)
goto qc_host_busy_free_sp;
- spin_lock_irq(ha->host->host_lock);
return 0;
qc_host_busy_free_sp:
qla4xxx_srb_free_dma(ha, srb);
mempool_free(srb, ha->srb_mempool);
-qc_host_busy_lock:
- spin_lock_irq(ha->host->host_lock);
-
qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla4xxx_queuecommand)
-
/**
* qla4xxx_mem_free - frees memory allocated to adapter
* @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
+ QLA82XX_PEG_HALT_STATUS1);
+
+ ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): %s, Dumping hw/fw registers:\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
+ " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
+ " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
+ " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
+ ha->host_no, __func__, halt_status,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS2),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
+ 0x3c),
+ qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
+ 0x3c));
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
- test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
+ test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
}
/* Wakeup the dpc routine for this adapter, if needed. */
- if ((start_dpc ||
+ if (start_dpc ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
- test_bit(DPC_AEN, &ha->dpc_flags)) &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
- ha->dpc_thread) {
+ test_bit(DPC_AEN, &ha->dpc_flags)) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
" - dpc flags = 0x%lx\n",
ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
- if (ha->dpc_thread &&
- !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
- set_bit(AF_DPC_SCHEDULED, &ha->flags);
+ if (ha->dpc_thread)
queue_work(ha->dpc_thread, &ha->dpc_work);
- }
}
/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
- goto do_dpc_exit;
+ return;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
ha->host_no, __func__, ha->flags));
- goto do_dpc_exit;
+ return;
}
if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
}
}
-do_dpc_exit:
- clear_bit(AF_DPC_SCHEDULED, &ha->flags);
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 603155769407..610492877253 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k6"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf05c2e..a4b9cdbaaa0b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
#define BUS_RESET_SETTLE_TIME (10)
#define HOST_RESET_SETTLE_TIME (10)
+static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
@@ -947,6 +949,48 @@ retry_tur:
}
/**
+ * scsi_eh_test_devices - check if devices are responding from error recovery.
+ * @cmd_list: scsi commands in error recovery.
+ * @work_q: queue for commands which still need more error recovery
+ * @done_q: queue for commands which are finished
+ * @try_stu: boolean on if a STU command should be tried in addition to TUR.
+ *
+ * Decription:
+ * Tests if devices are in a working state. Commands to devices now in
+ * a working state are sent to the done_q while commands to devices which
+ * are still failing to respond are returned to the work_q for more
+ * processing.
+ **/
+static int scsi_eh_test_devices(struct list_head *cmd_list,
+ struct list_head *work_q,
+ struct list_head *done_q, int try_stu)
+{
+ struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
+ int finish_cmds;
+
+ while (!list_empty(cmd_list)) {
+ scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
+ sdev = scmd->device;
+
+ finish_cmds = !scsi_device_online(scmd->device) ||
+ (try_stu && !scsi_eh_try_stu(scmd) &&
+ !scsi_eh_tur(scmd)) ||
+ !scsi_eh_tur(scmd);
+
+ list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
+ if (scmd->device == sdev) {
+ if (finish_cmds)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, work_q);
+ }
+ }
+ return list_empty(work_q);
+}
+
+
+/**
* scsi_eh_abort_cmds - abort pending commands.
* @work_q: &list_head for pending commands.
* @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
- }
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
} else
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
scmd));
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
LIST_HEAD(tmp_list);
+ LIST_HEAD(check_list);
list_splice_init(work_q, &tmp_list);
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
if (scmd_id(scmd) != id)
continue;
- if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
- && (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
+ if (rtn == SUCCESS)
+ list_move_tail(&scmd->eh_entry, &check_list);
+ else if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
/* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *chan_scmd, *next;
+ LIST_HEAD(check_list);
unsigned int channel;
int rtn;
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
rtn = scsi_try_bus_reset(chan_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (channel == scmd_channel(scmd))
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- !scsi_eh_tur(scmd))
+ if (channel == scmd_channel(scmd)) {
+ if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd,
done_q);
+ else
+ list_move_tail(&scmd->eh_entry,
+ &check_list);
+ }
}
} else {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
channel));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
}
/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ LIST_HEAD(check_list);
int rtn;
if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
+ if (rtn == SUCCESS) {
+ list_splice_init(work_q, &check_list);
+ } else if (rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (!scsi_device_online(scmd->device) ||
- rtn == FAST_IO_FAIL ||
- (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
- !scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
}
} else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
current->comm));
}
}
- return list_empty(work_q);
+ return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
}
/**
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index f46855cd853d..ad747dc337da 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -381,11 +381,6 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
return err;
}
-/**
- * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
- * @s: output goes here
- * @p: not used
- */
static int always_match(struct device *dev, void *data)
{
return 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 58584dc0724a..44e8ca398efa 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
-
+ blk_get_queue(sdev->request_queue);
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e63912510fb9..e0bd3f790fca 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(evt);
}
+ blk_put_queue(sdev->request_queue);
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289cfacb..2bea4f0b684a 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e64e85..953773cb26d9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int max_blocks = 0;
q->limits.discard_zeroes_data = sdkp->lbprz;
- q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_alignment = sdkp->unmap_alignment *
+ logical_block_size;
q->limits.discard_granularity =
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int dbd;
int modepage;
+ int first_len;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
int old_wce = sdkp->WCE;
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
- if (sdp->skip_ms_page_8)
- goto defaults;
-
- if (sdp->type == TYPE_RBC) {
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+ goto defaults;
+ else {
+ if (sdp->skip_ms_page_3f)
+ goto defaults;
+ modepage = 0x3F;
+ if (sdp->use_192_bytes_for_3f)
+ first_len = 192;
+ dbd = 0;
+ }
+ } else if (sdp->type == TYPE_RBC) {
modepage = 6;
dbd = 8;
} else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ &data, &sshdr);
if (!scsi_status_is_good(res))
goto bad_sense;
if (!data.header_length) {
modepage = 6;
+ first_len = 0;
sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
}
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
*/
if (len < 3)
goto bad_sense;
- if (len > 20)
- len = 20;
-
- /* Take headers and block descriptors into account */
- len += data.header_length + data.block_descriptor_length;
- if (len > SD_BUF_SIZE)
- goto bad_sense;
+ else if (len > SD_BUF_SIZE) {
+ sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+ "data from %d to %d bytes\n", len, SD_BUF_SIZE);
+ len = SD_BUF_SIZE;
+ }
+ if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
+ len = 192;
/* Get the data */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
+ if (len > first_len)
+ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ &data, &sshdr);
if (scsi_status_is_good(res)) {
int offset = data.header_length + data.block_descriptor_length;
- if (offset >= SD_BUF_SIZE - 2) {
- sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
- goto defaults;
+ while (offset < len) {
+ u8 page_code = buffer[offset] & 0x3F;
+ u8 spf = buffer[offset] & 0x40;
+
+ if (page_code == 8 || page_code == 6) {
+ /* We're interested only in the first 3 bytes.
+ */
+ if (len - offset <= 2) {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ } else {
+ modepage = page_code;
+ goto Page_found;
+ }
+ } else {
+ /* Go to the next page */
+ if (spf && len - offset > 3)
+ offset += 4 + (buffer[offset+2] << 8) +
+ buffer[offset+3];
+ else if (!spf && len - offset > 1)
+ offset += 2 + buffer[offset+1];
+ else {
+ sd_printk(KERN_ERR, sdkp, "Incomplete "
+ "mode parameter data\n");
+ goto defaults;
+ }
+ }
}
- if ((buffer[offset] & 0x3f) != modepage) {
+ if (modepage == 0x3F) {
+ sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+ "present\n");
+ goto defaults;
+ } else if ((buffer[offset] & 0x3f) != modepage) {
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
goto defaults;
}
-
+ Page_found:
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b7daad..7e22b737dfd8 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
"0: bsfw %1,%w0\n\t"
"btr %0,%1\n\t"
"jnc 0b"
- : "=&r" (rv), "=m" (*field) :);
+ : "=&r" (rv), "+m" (*field) :);
return rv;
}
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 4f64183b27fa..7e9c39951ecb 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -635,7 +635,7 @@ static void clks_core_resume(void)
struct clk *clkp;
list_for_each_entry(clkp, &clock_list, node) {
- if (likely(clkp->ops)) {
+ if (likely(clkp->usecount && clkp->ops)) {
unsigned long rate = clkp->rate;
if (likely(clkp->ops->set_parent))
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fbd96b29530d..de35c3ad8a69 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,15 @@ config SPI_BFIN
help
This is the SPI controller master driver for Blackfin 5xx processor.
+config SPI_BFIN_SPORT
+ tristate "SPI bus via Blackfin SPORT"
+ depends on BLACKFIN
+ help
+ Enable support for a SPI bus via the Blackfin SPORT peripheral.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_bfin_sport.
+
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fd2fc5f6505f..0f8c69b6b19e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 6a9e58dd36c7..d18ce9e946d8 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1861,6 +1861,7 @@ static int pl022_setup(struct spi_device *spi)
}
if ((clk_freq.cpsdvsr < CPSDVR_MIN)
|| (clk_freq.cpsdvsr > CPSDVR_MAX)) {
+ status = -EINVAL;
dev_err(&spi->dev,
"cpsdvsr is configured incorrectly\n");
goto err_config_params;
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 6f86ba0175ac..969cdd2fe124 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -298,7 +298,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
unsigned int count, c;
unsigned long base, tx_reg, rx_reg;
int word_len, data_type, element_count;
- int elements;
+ int elements = 0;
u32 l;
u8 * rx;
const u8 * tx;
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index f706dba165cf..cc880c95e7de 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
- if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+ bits_per_word = transfer->bits_per_word ? :
+ message->spi->bits_per_word ? : 8;
+ if (bits_per_word % 16 == 0) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = (transfer->len) >> 1;
cr_width = BIT_CTL_WORDSIZE;
drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
- } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+ } else if (bits_per_word % 8 == 0) {
drv_data->n_bytes = bits_per_word/8;
drv_data->len = transfer->len;
cr_width = 0;
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c
new file mode 100644
index 000000000000..e557ff617b11
--- /dev/null
+++ b/drivers/spi/spi_bfin_sport.c
@@ -0,0 +1,952 @@
+/*
+ * SPI bus via the Blackfin SPORT peripheral
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include <asm/portmux.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/blackfin.h>
+#include <asm/bfin_sport.h>
+#include <asm/cacheflush.h>
+
+#define DRV_NAME "bfin-sport-spi"
+#define DRV_DESC "SPI bus via the Blackfin SPORT"
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bfin-sport-spi");
+
+enum bfin_sport_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE,
+};
+
+struct bfin_sport_spi_master_data;
+
+struct bfin_sport_transfer_ops {
+ void (*write) (struct bfin_sport_spi_master_data *);
+ void (*read) (struct bfin_sport_spi_master_data *);
+ void (*duplex) (struct bfin_sport_spi_master_data *);
+};
+
+struct bfin_sport_spi_master_data {
+ /* Driver model hookup */
+ struct device *dev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct sport_register __iomem *regs;
+ int err_irq;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ bool run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ enum bfin_sport_spi_state state;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_sport_spi_slave_data *cur_chip;
+ union {
+ void *tx;
+ u8 *tx8;
+ u16 *tx16;
+ };
+ void *tx_end;
+ union {
+ void *rx;
+ u8 *rx8;
+ u16 *rx16;
+ };
+ void *rx_end;
+
+ int cs_change;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+struct bfin_sport_spi_slave_data {
+ u16 ctl_reg;
+ u16 baud;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u16 idle_tx_val;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+static void
+bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_or(&drv_data->regs->tcr1, TSPEN);
+ bfin_write_or(&drv_data->regs->rcr1, TSPEN);
+ SSYNC();
+}
+
+static void
+bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
+ bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
+ SSYNC();
+}
+
+/* Caculate the SPI_BAUD register value based on input HZ */
+static u16
+bfin_sport_hz_to_spi_baud(u32 speed_hz)
+{
+ u_long clk, sclk = get_sclk();
+ int div = (sclk / (2 * speed_hz)) - 1;
+
+ if (div < 0)
+ div = 0;
+
+ clk = sclk / (2 * (div + 1));
+
+ if (clk > speed_hz)
+ div++;
+
+ return div;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void
+bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 0);
+}
+
+static void
+bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 1);
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+static void
+bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long timeout = jiffies + HZ;
+ while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
+ if (!time_before(jiffies, timeout))
+ break;
+ }
+}
+
+static void
+bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
+ .write = bfin_sport_spi_u8_writer,
+ .read = bfin_sport_spi_u8_reader,
+ .duplex = bfin_sport_spi_u8_duplex,
+};
+
+static void
+bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
+ .write = bfin_sport_spi_u16_writer,
+ .read = bfin_sport_spi_u16_reader,
+ .duplex = bfin_sport_spi_u16_duplex,
+};
+
+/* stop controller and re-config current chip */
+static void
+bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
+
+ bfin_sport_spi_disable(drv_data);
+ dev_dbg(drv_data->dev, "restoring spi ctl state\n");
+
+ bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
+ bfin_write(&drv_data->regs->tcr2, bits);
+ bfin_write(&drv_data->regs->tclkdiv, chip->baud);
+ bfin_write(&drv_data->regs->tfsdiv, bits);
+ SSYNC();
+
+ bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
+ bfin_write(&drv_data->regs->rcr2, bits);
+ SSYNC();
+
+ bfin_sport_spi_cs_active(chip);
+}
+
+/* test if there is more transfer to be done */
+static enum bfin_sport_spi_state
+bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return RUNNING_STATE;
+ }
+
+ return DONE_STATE;
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void
+bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->state = START_STATE;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ if (!drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static irqreturn_t
+sport_err_handler(int irq, void *dev_id)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_id;
+ u16 status;
+
+ dev_dbg(drv_data->dev, "%s enter\n", __func__);
+ status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
+
+ if (status) {
+ bfin_write(&drv_data->regs->stat, status);
+ SSYNC();
+
+ bfin_sport_spi_disable(drv_data);
+ dev_err(drv_data->dev, "status error:%s%s%s%s\n",
+ status & TOVF ? " TOVF" : "",
+ status & TUVF ? " TUVF" : "",
+ status & ROVF ? " ROVF" : "",
+ status & RUVF ? " RUVF" : "");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+bfin_sport_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_sport_spi_master_data *drv_data = (void *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct bfin_sport_spi_slave_data *chip = NULL;
+ unsigned int bits_per_word;
+ u32 tranf_success = 1;
+ u32 transfer_speed;
+ u8 full_duplex = 0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ if (transfer->speed_hz)
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
+ else
+ transfer_speed = chip->baud;
+ bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
+ SSYNC();
+
+ /*
+ * if msg is error or done, report it back using complete() callback
+ */
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
+ message->status = -EIO;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ dev_dbg(drv_data->dev, "transfer: all done!\n");
+ message->status = 0;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer */
+ if (drv_data->state == RUNNING_STATE) {
+ dev_dbg(drv_data->dev, "transfer: still running ...\n");
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ if (transfer->len == 0) {
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+
+ if (transfer->tx_buf != NULL) {
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
+ transfer->tx_buf, drv_data->tx_end);
+ } else
+ drv_data->tx = NULL;
+
+ if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
+ transfer->rx_buf, drv_data->rx_end);
+ } else
+ drv_data->rx = NULL;
+
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
+ if (bits_per_word == 8)
+ drv_data->ops = &bfin_sport_transfer_ops_u8;
+ else
+ drv_data->ops = &bfin_sport_transfer_ops_u16;
+
+ drv_data->state = RUNNING_STATE;
+
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_active(chip);
+
+ dev_dbg(drv_data->dev,
+ "now pumping a transfer: width is %d, len is %d\n",
+ bits_per_word, transfer->len);
+
+ /* PIO mode write then read */
+ dev_dbg(drv_data->dev, "doing IO transfer\n");
+
+ bfin_sport_spi_enable(drv_data);
+ if (full_duplex) {
+ /* full duplex mode */
+ BUG_ON((drv_data->tx_end - drv_data->tx) !=
+ (drv_data->rx_end - drv_data->rx));
+ drv_data->ops->duplex(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->tx != NULL) {
+ /* write only half duplex */
+
+ drv_data->ops->write(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->rx != NULL) {
+ /* read only half duplex */
+
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ tranf_success = 0;
+ }
+ bfin_sport_spi_disable(drv_data);
+
+ if (!tranf_success) {
+ dev_dbg(drv_data->dev, "IO write error!\n");
+ drv_data->state = ERROR_STATE;
+ } else {
+ /* Update total byte transfered */
+ message->actual_length += transfer->len;
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+ }
+
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+/* pop a msg from queue and kick off real transfer */
+static void
+bfin_sport_spi_pump_messages(struct work_struct *work)
+{
+ struct bfin_sport_spi_master_data *drv_data;
+ unsigned long flags;
+ struct spi_message *next_msg;
+
+ drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || !drv_data->run) {
+ /* pumper kicked off but no work to do */
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ next_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+
+ drv_data->cur_msg = next_msg;
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initialize message state */
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ bfin_sport_spi_restore_state(drv_data);
+ dev_dbg(drv_data->dev, "got a message to pump, "
+ "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
+ drv_data->cur_chip->ctl_reg);
+
+ dev_dbg(drv_data->dev,
+ "the first transfer len is %d\n",
+ drv_data->cur_transfer->len);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int
+bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (!drv_data->run) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ dev_dbg(&spi->dev, "adding an msg in transfer()\n");
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->run && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+/* Called every time common spi devices change state */
+static int
+bfin_sport_spi_setup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip, *first = NULL;
+ int ret;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ struct bfin5xx_spi_chip *chip_info;
+
+ chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* platform chip_info isn't required */
+ chip_info = spi->controller_data;
+ if (chip_info) {
+ /*
+ * DITFS and TDTYPE are only thing we don't set, but
+ * they probably shouldn't be changed by people.
+ */
+ if (chip_info->ctl_reg || chip_info->enable_dma) {
+ ret = -EINVAL;
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ goto error;
+ }
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->idle_tx_val = chip_info->idle_tx_val;
+ spi->bits_per_word = chip_info->bits_per_word;
+ }
+ }
+
+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* translate common spi framework into our register
+ * following configure contents are same for tx and rx.
+ */
+
+ if (spi->mode & SPI_CPHA)
+ chip->ctl_reg &= ~TCKFE;
+ else
+ chip->ctl_reg |= TCKFE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctl_reg |= TLSBIT;
+ else
+ chip->ctl_reg &= ~TLSBIT;
+
+ /* Sport in master mode */
+ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
+
+ chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
+
+ chip->cs_gpio = spi->chip_select;
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret)
+ goto error;
+
+ dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
+ spi->modalias, spi->bits_per_word);
+ dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
+ chip->ctl_reg, spi->chip_select);
+
+ spi_set_ctldata(spi, chip);
+
+ bfin_sport_spi_cs_deactive(chip);
+
+ return ret;
+
+ error:
+ kfree(first);
+ return ret;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void
+bfin_sport_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
+
+ if (!chip)
+ return;
+
+ gpio_free(chip->cs_gpio);
+
+ kfree(chip);
+}
+
+static int
+bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->run = false;
+ drv_data->busy = 0;
+
+ /* init transfer tasklet */
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
+
+ /* init messages workqueue */
+ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
+ drv_data->workqueue =
+ create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->run = true;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static inline int
+bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead
+ */
+ drv_data->run = false;
+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static inline int
+bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int __devinit
+bfin_sport_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin5xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct resource *res, *ires;
+ struct bfin_sport_spi_master_data *drv_data;
+ int status;
+
+ platform_info = dev->platform_data;
+
+ /* Allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*master) + 16);
+ if (!master) {
+ dev_err(dev, "cannot alloc spi_master\n");
+ return -ENOMEM;
+ }
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->dev = dev;
+ drv_data->pin_req = platform_info->pin_req;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = bfin_sport_spi_cleanup;
+ master->setup = bfin_sport_spi_setup;
+ master->transfer = bfin_sport_spi_transfer;
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
+ dev_err(dev, "cannot map registers\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ dev_err(dev, "cannot get IORESOURCE_IRQ\n");
+ status = -ENODEV;
+ goto out_error_get_ires;
+ }
+ drv_data->err_irq = ires->start;
+
+ /* Initial and start queue */
+ status = bfin_sport_spi_init_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem initializing queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem starting queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = request_irq(drv_data->err_irq, sport_err_handler,
+ 0, "sport_spi_err", drv_data);
+ if (status) {
+ dev_err(dev, "unable to request sport err irq\n");
+ goto out_error_irq;
+ }
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status) {
+ dev_err(dev, "requesting peripherals failed\n");
+ goto out_error_peripheral;
+ }
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status) {
+ dev_err(dev, "problem registering spi master\n");
+ goto out_error_master;
+ }
+
+ dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
+ return 0;
+
+ out_error_master:
+ peripheral_free_list(drv_data->pin_req);
+ out_error_peripheral:
+ free_irq(drv_data->err_irq, drv_data);
+ out_error_irq:
+ out_error_queue_alloc:
+ bfin_sport_spi_destroy_queue(drv_data);
+ out_error_get_ires:
+ iounmap(drv_data->regs);
+ out_error_ioremap:
+ out_error_get_res:
+ spi_master_put(master);
+
+ return status;
+}
+
+/* stop hardware and remove the driver */
+static int __devexit
+bfin_sport_spi_remove(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = bfin_sport_spi_destroy_queue(drv_data);
+ if (status)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ bfin_sport_spi_disable(drv_data);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ /* Prevent double remove */
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ /* stop hardware */
+ bfin_sport_spi_disable(drv_data);
+
+ return status;
+}
+
+static int
+bfin_sport_spi_resume(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ /* Enable the SPI interface */
+ bfin_sport_spi_enable(drv_data);
+
+ /* Start the queue running */
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status)
+ dev_err(drv_data->dev, "problem resuming queue\n");
+
+ return status;
+}
+#else
+# define bfin_sport_spi_suspend NULL
+# define bfin_sport_spi_resume NULL
+#endif
+
+static struct platform_driver bfin_sport_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = bfin_sport_spi_probe,
+ .remove = __devexit_p(bfin_sport_spi_remove),
+ .suspend = bfin_sport_spi_suspend,
+ .resume = bfin_sport_spi_resume,
+};
+
+static int __init bfin_sport_spi_init(void)
+{
+ return platform_driver_register(&bfin_sport_spi_driver);
+}
+module_init(bfin_sport_spi_init);
+
+static void __exit bfin_sport_spi_exit(void)
+{
+ platform_driver_unregister(&bfin_sport_spi_driver);
+}
+module_exit(bfin_sport_spi_exit);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index a3938958147c..32a40876532f 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -283,7 +283,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return 0;
err_gpios:
- for (; ptr > 0; ptr--)
+ while (--ptr >= 0)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
@@ -301,6 +301,7 @@ static int __devexit tle62x0_remove(struct spi_device *spi)
for (ptr = 0; ptr < st->nr_gpio; ptr++)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
+ device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
return 0;
}
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 82feb348c8bb..2a20dabec76d 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -539,10 +539,12 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
- /* Additional always once-executed workarounds */
- ssb_pcicore_serdes_workaround(pc);
- /* TODO: ASPM */
- /* TODO: Clock Request Update */
+ /* Additional PCIe always once-executed workarounds */
+ if (dev->id.coreid == SSB_DEV_PCIE) {
+ ssb_pcicore_serdes_workaround(pc);
+ /* TODO: ASPM */
+ /* TODO: Clock Request Update */
+ }
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dfc16f955eb8..196284dc2f36 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,23 +24,6 @@ menuconfig STAGING
if STAGING
-config STAGING_EXCLUDE_BUILD
- bool "Exclude Staging drivers from being built" if STAGING
- default y
- ---help---
- Are you sure you really want to build the staging drivers?
- They taint your kernel, don't live up to the normal Linux
- kernel quality standards, are a bit crufty around the edges,
- and might go off and kick your dog when you aren't paying
- attention.
-
- Say N here to be able to select and build the Staging drivers.
- This option is primarily here to prevent them from being built
- when selecting 'make allyesconfg' and 'make allmodconfig' so
- don't be all that put off, your dog will be just fine.
-
-if !STAGING_EXCLUDE_BUILD
-
source "drivers/staging/tty/Kconfig"
source "drivers/staging/generic_serial/Kconfig"
@@ -177,5 +160,4 @@ source "drivers/staging/mei/Kconfig"
source "drivers/staging/nvec/Kconfig"
-endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/altera-stapl/altera-jtag.c b/drivers/staging/altera-stapl/altera-jtag.c
index 876308858b82..8b1620b1b2d0 100644
--- a/drivers/staging/altera-stapl/altera-jtag.c
+++ b/drivers/staging/altera-stapl/altera-jtag.c
@@ -26,7 +26,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <staging/altera.h>
+#include "altera.h"
#include "altera-exprt.h"
#include "altera-jtag.h"
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/staging/altera-stapl/altera.c
index 05aad351b120..9cd5e76880c0 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/staging/altera-stapl/altera.c
@@ -28,7 +28,7 @@
#include <linux/string.h>
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <staging/altera.h>
+#include "altera.h"
#include "altera-exprt.h"
#include "altera-jtag.h"
diff --git a/drivers/staging/altera-stapl/altera.h b/drivers/staging/altera-stapl/altera.h
new file mode 100644
index 000000000000..94c0c6181daf
--- /dev/null
+++ b/drivers/staging/altera-stapl/altera.h
@@ -0,0 +1,49 @@
+/*
+ * altera.h
+ *
+ * altera FPGA driver
+ *
+ * Copyright (C) Altera Corporation 1998-2001
+ * Copyright (C) 2010 NetUP Inc.
+ * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ALTERA_H_
+#define _ALTERA_H_
+
+struct altera_config {
+ void *dev;
+ u8 *action;
+ int (*jtag_io) (void *dev, int tms, int tdi, int tdo);
+};
+
+#if defined(CONFIG_ALTERA_STAPL) || \
+ (defined(CONFIG_ALTERA_STAPL_MODULE) && defined(MODULE))
+
+extern int altera_init(struct altera_config *config, const struct firmware *fw);
+#else
+
+static inline int altera_init(struct altera_config *config,
+ const struct firmware *fw)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
+#endif /* CONFIG_ALTERA_STAPL */
+
+#endif /* _ALTERA_H_ */
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index 1f15e1fb1ab2..afd6cc16a2b8 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -1,6 +1,7 @@
config ATH6K_LEGACY
tristate "Atheros AR6003 support (non mac80211)"
depends on MMC && WLAN
+ depends on CFG80211
select WIRELESS_EXT
select WEXT_PRIV
help
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index 77dfb4070c1d..d3a774dbb7e8 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -870,7 +870,8 @@ ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
if(ar->scan_request)
{
/* Translate data to cfg80211 mgmt format */
- wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
+ if (ar->arWmi)
+ wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
cfg80211_scan_done(ar->scan_request,
((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false);
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index 929ceaf363be..15e1b05ca92d 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -64,8 +64,6 @@ wl_iw_extra_params_t g_wl_iw_params;
extern bool wl_iw_conn_status_str(u32 event_type, u32 status,
u32 reason, char *stringBuf, uint buflen);
-uint wl_msg_level = WL_ERROR_VAL;
-
#define MAX_WLIW_IOCTL_LEN 1024
#ifdef CONFIG_WIRELESS_EXT
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 1c45c11a774e..aa87b1b6a44a 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -542,6 +542,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
unsigned long irqflags;
int ret = -ENOMEM;
uint32_t tt_pages;
+ struct drm_connector *connector;
+ struct psb_intel_output *psb_intel_output;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
@@ -663,7 +665,18 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
drm_kms_helper_poll_init(dev);
}
- ret = psb_backlight_init(dev);
+ /* Only add backlight support if we have LVDS output */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ psb_intel_output = to_psb_intel_output(connector);
+
+ switch (psb_intel_output->type) {
+ case INTEL_OUTPUT_LVDS:
+ ret = psb_backlight_init(dev);
+ break;
+ }
+ }
+
if (ret)
return ret;
#if 0
diff --git a/drivers/staging/gma500/psb_fb.c b/drivers/staging/gma500/psb_fb.c
index 99c03a2e06bd..084c36bbfe86 100644
--- a/drivers/staging/gma500/psb_fb.c
+++ b/drivers/staging/gma500/psb_fb.c
@@ -441,6 +441,16 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->screen_size = size;
memset(info->screen_base, 0, size);
+ if (dev_priv->pg->stolen_size) {
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = dev_priv->pg->stolen_size;
+ }
+
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/staging/gma500/psb_intel_bios.c b/drivers/staging/gma500/psb_intel_bios.c
index 48ac8ba7f40b..417965da5e24 100644
--- a/drivers/staging/gma500/psb_intel_bios.c
+++ b/drivers/staging/gma500/psb_intel_bios.c
@@ -154,10 +154,15 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
- dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
-
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
- drm_mode_debug_printmodeline(panel_fixed_mode);
+ if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+ } else {
+ DRM_DEBUG("Ignoring bogus LVDS VBT mode.\n");
+ dev_priv->lvds_vbt = 0;
+ kfree(panel_fixed_mode);
+ }
return;
}
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index 0b9b85424dfa..4cc1a5bfab40 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -81,7 +81,6 @@ struct adis16201_state {
int adis16201_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
enum adis16201_scan {
ADIS16201_SCAN_SUPPLY,
ADIS16201_SCAN_ACC_X,
@@ -92,6 +91,7 @@ enum adis16201_scan {
ADIS16201_SCAN_INCLI_Y,
};
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16201_remove_trigger(struct iio_dev *indio_dev);
int adis16201_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 8bb8ce50c248..175e21bb9b40 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -76,7 +76,6 @@ struct adis16203_state {
int adis16203_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
enum adis16203_scan {
ADIS16203_SCAN_SUPPLY,
ADIS16203_SCAN_AUX_ADC,
@@ -85,6 +84,7 @@ enum adis16203_scan {
ADIS16203_SCAN_INCLI_Y,
};
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16203_remove_trigger(struct iio_dev *indio_dev);
int adis16203_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index 881768df47a6..2fe34d21b6aa 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -195,7 +195,7 @@ static const struct iio_info max517_info = {
};
static const struct iio_info max518_info = {
- .attrs = &max517_attribute_group,
+ .attrs = &max518_attribute_group,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 2589a7e167e4..3612373ddede 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -137,13 +137,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
if (st->variant->flags & ADIS16400_NO_BURST) {
ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
if (ret < 0)
- return ret;
+ goto err;
for (; i < ring->scan_count; i++)
data[i] = *(s16 *)(st->rx + i*2);
} else {
ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
if (ret < 0)
- return ret;
+ goto err;
for (; i < indio_dev->ring->scan_count; i++) {
j = __ffs(mask);
mask &= ~(1 << j);
@@ -158,9 +158,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
- kfree(data);
+ kfree(data);
return IRQ_HANDLED;
+
+err:
+ kfree(data);
+ return ret;
}
void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 615902333fb0..d504aa251ced 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -294,6 +294,7 @@ struct iio_poll_func
pf->h = h;
pf->thread = thread;
pf->type = type;
+ pf->private_data = private;
return pf;
}
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 2818851c0761..d1ffa32cd141 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -205,10 +205,10 @@ int mei_hw_init(struct mei_device *dev)
"host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
- if (!(dev->host_hw_state & H_RDY) != H_RDY)
+ if (!(dev->host_hw_state & H_RDY))
dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
- if (!(dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
+ if (!(dev->me_hw_state & ME_RDY_HRA))
dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
printk(KERN_ERR "mei: link layer initialization failed.\n");
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index b05306766870..fe40e0b6f675 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -2,6 +2,7 @@ config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
select I2C
+ select BACKLIGHT_CLASS_DEVICE
---help---
Add support for the OLPC XO DCON controller. This controller is
only available on OLPC platforms. Unless you have one of these
diff --git a/drivers/staging/rts_pstor/sd.c b/drivers/staging/rts_pstor/sd.c
index bddb0312b31e..cdae497d5467 100644
--- a/drivers/staging/rts_pstor/sd.c
+++ b/drivers/staging/rts_pstor/sd.c
@@ -2328,7 +2328,7 @@ Switch_Fail:
retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5);
if (retval == STATUS_SUCCESS) {
- int func_num = (rsp[1] >> 4) && 0x07;
+ int func_num = (rsp[1] >> 4) & 0x07;
if (func_num) {
RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
chip->sd_io = 1;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 6e99ec87fee0..8cbea42b69bc 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -26,6 +26,8 @@
static int stub_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void stub_disconnect(struct usb_interface *interface);
+static int stub_pre_reset(struct usb_interface *interface);
+static int stub_post_reset(struct usb_interface *interface);
/*
* Define device IDs here if you want to explicitly limit exportable devices.
@@ -59,6 +61,8 @@ struct usb_driver stub_driver = {
.probe = stub_probe,
.disconnect = stub_disconnect,
.id_table = stub_table,
+ .pre_reset = stub_pre_reset,
+ .post_reset = stub_post_reset,
};
/*
@@ -541,3 +545,20 @@ static void stub_disconnect(struct usb_interface *interface)
del_match_busid((char *)udev_busid);
}
}
+
+/*
+ * Presence of pre_reset and post_reset prevents the driver from being unbound
+ * when the device is being reset
+ */
+
+int stub_pre_reset(struct usb_interface *interface)
+{
+ dev_dbg(&interface->dev, "pre_reset\n");
+ return 0;
+}
+
+int stub_post_reset(struct usb_interface *interface)
+{
+ dev_dbg(&interface->dev, "post_reset\n");
+ return 0;
+}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index a5c1fa1f0430..bc57844600b9 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -175,16 +175,18 @@ static int tweak_reset_device_cmd(struct urb *urb)
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
/*
- * usb_lock_device_for_reset caused a deadlock: it causes the driver
- * to unbind. In the shutdown the rx thread is signalled to shut down
- * but this thread is pending in the usb_lock_device_for_reset.
- *
- * Instead queue the reset.
- *
- * Unfortunatly an existing usbip connection will be dropped due to
- * driver unbinding.
+ * With the implementation of pre_reset and post_reset the driver no
+ * longer unbinds. This allows the use of synchronous reset.
*/
- usb_queue_reset_device(sdev->interface);
+
+ if (usb_lock_device_for_reset(sdev->udev, sdev->interface)<0)
+ {
+ dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+ return 0;
+ }
+ usb_reset_device(sdev->udev);
+ usb_unlock_device(sdev->udev);
+
return 0;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aed4e464d31c..70c2e7fa6664 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -95,17 +95,17 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
if (sc->device->tagged_supported) {
switch (sc->tag) {
case HEAD_OF_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_HOQ;
+ sam_task_attr = MSG_HEAD_TAG;
break;
case ORDERED_QUEUE_TAG:
- sam_task_attr = TASK_ATTR_ORDERED;
+ sam_task_attr = MSG_ORDERED_TAG;
break;
default:
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
break;
}
} else
- sam_task_attr = TASK_ATTR_SIMPLE;
+ sam_task_attr = MSG_SIMPLE_TAG;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
@@ -379,14 +379,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, TASK_ATTR_SIMPLE,
+ DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
/*
* Allocate the LUN_RESET TMR
*/
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
TMR_LUN_RESET);
- if (!se_cmd->se_tmr_req)
+ if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
* Locate the underlying TCM struct se_lun from sc->device->lun
@@ -939,18 +939,6 @@ static u16 tcm_loop_get_fabric_sense_len(void)
return 0;
}
-static u64 tcm_loop_pack_lun(unsigned int lun)
-{
- u64 result;
-
- /* LSB of lun into byte 1 big-endian */
- result = ((lun & 0xff) << 8);
- /* use flat space addressing method */
- result |= 0x40 | ((lun >> 8) & 0x3f);
-
- return cpu_to_le64(result);
-}
-
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
@@ -1029,6 +1017,7 @@ static int tcm_loop_make_nexus(
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
+ int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
@@ -1045,8 +1034,10 @@ static int tcm_loop_make_nexus(
* Initialize the struct se_session pointer
*/
tl_nexus->se_sess = transport_init_session();
- if (!tl_nexus->se_sess)
+ if (IS_ERR(tl_nexus->se_sess)) {
+ ret = PTR_ERR(tl_nexus->se_sess);
goto out;
+ }
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1072,7 +1063,7 @@ static int tcm_loop_make_nexus(
out:
kfree(tl_nexus);
- return -ENOMEM;
+ return ret;
}
static int tcm_loop_drop_nexus(
@@ -1152,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
- if (strlen(page) > TL_WWN_ADDR_LEN) {
+ if (strlen(page) >= TL_WWN_ADDR_LEN) {
printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
@@ -1333,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
return ERR_PTR(-EINVAL);
check_len:
- if (strlen(name) > TL_WWN_ADDR_LEN) {
+ if (strlen(name) >= TL_WWN_ADDR_LEN) {
printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
@@ -1481,7 +1472,6 @@ static int tcm_loop_register_configfs(void)
fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
- fabric->tf_ops.pack_lun = &tcm_loop_pack_lun;
tf_cg = &fabric->tf_group;
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f44a6e6e1d..25c1f49a7d8b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
printk(KERN_ERR "Unable to locate passed fabric name\n");
return NULL;
}
- if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+ if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
return NULL;
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!(tf))
- return ERR_PTR(-ENOMEM);
+ return NULL;
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
@@ -497,10 +497,6 @@ static int target_fabric_tf_ops_check(
printk(KERN_ERR "Missing tfo->is_state_remove()\n");
return -EINVAL;
}
- if (!(tfo->pack_lun)) {
- printk(KERN_ERR "Missing tfo->pack_lun()\n");
- return -EINVAL;
- }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
@@ -855,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
return -EOPNOTSUPP;
}
- if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+ if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
@@ -921,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if ((len + strlen(buf) >= PAGE_SIZE))
break;
len += sprintf(page+len, "%s", buf);
@@ -966,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if ((len + strlen(buf) >= PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if ((len + strlen(buf) >= PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if ((len + strlen(buf) >= PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
@@ -1303,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if ((len + strlen(buf) >= PAGE_SIZE))
break;
len += sprintf(page+len, "%s", buf);
@@ -1500,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+ if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
printk(KERN_ERR "APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
@@ -1514,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(isid) > PR_REG_ISID_LEN) {
+ if (strlen(isid) >= PR_REG_ISID_LEN) {
printk(KERN_ERR "APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
@@ -1575,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+ if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
printk(KERN_ERR "APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
@@ -3056,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget(
int ret;
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
- if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+ if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
printk(KERN_ERR "Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d25e20829012..ba698ea62bb2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -38,6 +38,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -150,13 +151,13 @@ out:
{
struct se_device *dev = se_lun->lun_se_dev;
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
}
/*
@@ -191,7 +192,7 @@ int transport_get_lun_for_tmr(
&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
- dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+ dev = se_lun->lun_se_dev;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
@@ -215,6 +216,7 @@ int transport_get_lun_for_tmr(
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -1;
}
+ se_tmr->tmr_dev = dev;
spin_lock(&dev->se_tmr_lock);
list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
@@ -658,8 +660,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
struct se_session *se_sess = SE_SESS(se_cmd);
struct se_task *se_task;
unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
- u32 cdb_offset = 0, lun_count = 0, offset = 8;
- u64 i, lun;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
break;
@@ -675,15 +676,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!(se_sess)) {
- lun = 0;
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
@@ -703,15 +696,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
if ((cdb_offset + 8) >= se_cmd->data_length)
continue;
- lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
- buf[offset++] = ((lun >> 56) & 0xff);
- buf[offset++] = ((lun >> 48) & 0xff);
- buf[offset++] = ((lun >> 40) & 0xff);
- buf[offset++] = ((lun >> 32) & 0xff);
- buf[offset++] = ((lun >> 24) & 0xff);
- buf[offset++] = ((lun >> 16) & 0xff);
- buf[offset++] = ((lun >> 8) & 0xff);
- buf[offset++] = (lun & 0xff);
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
cdb_offset += 8;
}
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
@@ -1445,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_lun_acl *lacl;
struct se_node_acl *nacl;
- if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+ if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
TPG_TFO(tpg)->get_fabric_name());
*ret = -EOVERFLOW;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a79f518ca6e2..b662db3a320b 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf(
pr_reg->pr_res_mapped_lun);
}
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
printk(KERN_ERR "Unable to update renaming"
" APTPL metadata\n");
spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf(
TPG_TFO(tpg)->tpg_get_tag(tpg),
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
printk(KERN_ERR "Unable to update renaming"
" APTPL metadata\n");
spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file(
memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
- if (strlen(&wwn->unit_serial[0]) > 512) {
+ if (strlen(&wwn->unit_serial[0]) >= 512) {
printk(KERN_ERR "WWN value for struct se_device does not fit"
" into path buffer\n");
return -1;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7ff6a35f26ac..331d423fd0e0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -41,7 +41,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -911,7 +911,7 @@ static int pscsi_do_task(struct se_task *task)
* descriptor
*/
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
- (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+ (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4a109835e420..179063d81cdd 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
@@ -74,10 +75,16 @@ void core_tmr_release_req(
{
struct se_device *dev = tmr->tmr_dev;
+ if (!dev) {
+ kmem_cache_free(se_tmr_req_cache, tmr);
+ return;
+ }
+
spin_lock(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
- kmem_cache_free(se_tmr_req_cache, tmr);
spin_unlock(&dev->se_tmr_lock);
+
+ kmem_cache_free(se_tmr_req_cache, tmr);
}
static void core_tmr_handle_tas_abort(
@@ -398,9 +405,9 @@ int core_tmr_lun_reset(
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock(&dev->stats_lock);
+ spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
- spin_unlock(&dev->stats_lock);
+ spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b9d3501bdd91..4b9b7169bdd9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -42,7 +42,7 @@
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/libsas.h> /* For TASK_ATTR_* */
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
struct se_node_acl *se_nacl;
-
+ unsigned long flags;
/*
* Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl;
if ((se_nacl)) {
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
list_del(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl->acl_sess_list.prev,
struct se_session, sess_acl_list);
}
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);
@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- transport_free_dev_tasks(cmd);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
@@ -1075,7 +1074,7 @@ static inline int transport_add_task_check_sam_attr(
* head of the struct se_device->execute_task_list, and task_prev
* after that for each subsequent task
*/
- if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
list_add(&task->t_execute_list,
(task_prev != NULL) ?
&task_prev->t_execute_list :
@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
break;
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
return task;
@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
{
unsigned long flags;
+ if (atomic_read(&task->task_execute_queue) == 0) {
+ dump_stack();
+ return;
+ }
+
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
atomic_dec(&dev->execute_tasks);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -1867,7 +1873,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
- if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+ if (cmd->sam_task_attr == MSG_ACA_TAG) {
DEBUG_STA("SAM Task Attribute ACA"
" emulation is not supported\n");
return -1;
@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
+void transport_generic_free_cmd_intr(
+ struct se_cmd *cmd)
+{
+ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
+}
+EXPORT_SYMBOL(transport_generic_free_cmd_intr);
+
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
@@ -2504,7 +2517,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
smp_mb__after_atomic_inc();
DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
@@ -2512,7 +2525,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
T_TASK(cmd)->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
list_add_tail(&cmd->se_ordered_list,
&SE_DEV(cmd)->ordered_cmd_list);
@@ -3411,7 +3424,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
case READ_BUFFER:
@@ -3619,7 +3632,7 @@ static int transport_generic_cmd_sequencer(
* See spc4r17 section 5.3
*/
if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
break;
default:
@@ -3777,21 +3790,21 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
- if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+ if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
list_del(&cmd->se_ordered_list);
atomic_dec(&dev->dev_ordered_sync);
@@ -3824,7 +3837,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
new_active_tasks++;
spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+ if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
break;
}
spin_unlock(&dev->delayed_cmd_lock);
@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
sg_end_cur->page_link &= ~0x02;
sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
sg_head = sg_head_cur;
sg_link = sg_link_cur;
- task_sg_num = task->task_sg_num;
continue;
}
sg_head = sg_first = &task->task_sg[0];
sg_link = &task->task_sg[task->task_sg_num];
- task_sg_num = task->task_sg_num;
/*
* Check for single task..
*/
@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
*/
sg_end = &task->task_sg[task->task_sg_num - 1];
sg_end->page_link &= ~0x02;
- sg_count += (task->task_sg_num + 1);
- } else
sg_count += task->task_sg_num;
+ task_sg_num = (task->task_sg_num + 1);
+ } else {
+ sg_count += task->task_sg_num;
+ task_sg_num = task->task_sg_num;
+ }
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
T_TASK(cmd)->t_tasks_sg_chained = sg_first;
T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
- DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+ DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
T_TASK(cmd)->t_tasks_sg_chained_no);
for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
T_TASK(cmd)->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
- sg, sg_page(sg), sg->length, sg->offset);
+ DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
+ i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
if (sg_is_chain(sg))
DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
}
-
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
if (wait_for_tasks && cmd->transport_wait_for_tasks)
cmd->transport_wait_for_tasks(cmd, 0, 0);
+ transport_free_dev_tasks(cmd);
+
transport_generic_remove(cmd, release_to_pool,
session_reinstatement);
}
@@ -6132,6 +6151,9 @@ get_cmd:
case TRANSPORT_REMOVE:
transport_generic_remove(cmd, 1, 0);
break;
+ case TRANSPORT_FREE_CMD_INTR:
+ transport_generic_free_cmd(cmd, 0, 1, 0);
+ break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index defff32b7880..7b82f1b7fef8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -144,7 +144,7 @@ enum ft_cmd_state {
*/
struct ft_cmd {
enum ft_cmd_state state;
- u16 lun; /* LUN from request */
+ u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 49e51778f733..b2a106729d49 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -35,6 +35,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
@@ -93,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
-/*
- * Get LUN from CDB.
- */
-static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
-{
- u64 lun;
-
- lun = lunp[1];
- switch (lunp[0] >> 6) {
- case 0:
- break;
- case 1:
- lun |= (lunp[0] & 0x3f) << 8;
- break;
- default:
- return -1;
- }
- if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return -1;
- cmd->lun = lun;
- return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
-}
-
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{
struct se_queue_obj *qobj;
@@ -417,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
{
struct se_tmr_req *tmr;
struct fcp_cmnd *fcp;
+ struct ft_sess *sess;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -424,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
- if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
- ft_dump_cmd(cmd, __func__);
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- ft_sess_put(cmd->sess);
- return;
- }
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
@@ -462,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd)
return;
}
cmd->se_cmd.se_tmr_req = tmr;
+
+ switch (fcp->fc_tm_flags) {
+ case FCP_TMF_LUN_RESET:
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
+ /*
+ * Make sure to clean up newly allocated TMR request
+ * since "unable to handle TMR request because failed
+ * to get to LUN"
+ */
+ FT_TM_DBG("Failed to get LUN for TMR func %d, "
+ "se_cmd %p, unpacked_lun %d\n",
+ tm_func, &cmd->se_cmd, cmd->lun);
+ ft_dump_cmd(cmd, __func__);
+ sess = cmd->sess;
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ cmd->se_cmd.scsi_sense_reason, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ ft_sess_put(sess);
+ return;
+ }
+ break;
+ case FCP_TMF_TGT_RESET:
+ case FCP_TMF_CLR_TASK_SET:
+ case FCP_TMF_ABT_TASK_SET:
+ case FCP_TMF_CLR_ACA:
+ break;
+ default:
+ return;
+ }
transport_generic_handle_tmr(&cmd->se_cmd);
}
@@ -592,8 +594,25 @@ static void ft_send_cmd(struct ft_cmd *cmd)
case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
goto err; /* TBD not supported by tcm_fc yet */
}
+ /*
+ * Locate the SAM Task Attr from fc_pri_ta
+ */
+ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_HEADQ:
+ task_attr = MSG_HEAD_TAG;
+ break;
+ case FCP_PTA_ORDERED:
+ task_attr = MSG_ORDERED_TAG;
+ break;
+ case FCP_PTA_ACA:
+ task_attr = MSG_ACA_TAG;
+ break;
+ case FCP_PTA_SIMPLE: /* Fallthrough */
+ default:
+ task_attr = MSG_SIMPLE_TAG;
+ }
+
- /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
@@ -617,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
- ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
transport_send_check_condition_and_sense(&cmd->se_cmd,
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index fcdbbffe88cc..84e868c255dd 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -519,13 +519,6 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
return tpg->index;
}
-static u64 ft_pack_lun(unsigned int index)
-{
- WARN_ON(index >= 256);
- /* Caller wants this byte-swapped */
- return cpu_to_le64((index & 0xff) << 8);
-}
-
static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
@@ -564,7 +557,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.get_fabric_sense_len = ft_get_fabric_sense_len,
.set_fabric_sense_len = ft_set_fabric_sense_len,
.is_state_remove = ft_is_state_remove,
- .pack_lun = ft_pack_lun,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 4c3c0efbe13f..8c4a24077d9d 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
/* XXX For now, initiator will retry */
if (printk_ratelimit())
printk(KERN_ERR "%s: Failed to send frame %p, "
- "xid <0x%x>, remaining <0x%x>, "
+ "xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index a3bd57f2ea32..7491e21cc6ae 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
return NULL;
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
lport = sess->tport->lport;
port_id = sess->port_id;
if (port_id == -1) {
- mutex_lock(&ft_lport_lock);
+ mutex_unlock(&ft_lport_lock);
return;
}
FT_SESS_DBG("port_id %x\n", port_id);
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index fc6f2a5bde01..0b1c82ad6805 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
dev_set_drvdata(hwmon->device, hwmon);
result = device_create_file(hwmon->device, &dev_attr_name);
if (result)
- goto unregister_hwmon_device;
+ goto free_mem;
register_sys_interface:
tz->hwmon = hwmon;
@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
sysfs_attr_init(&tz->temp_input.attr.attr);
result = device_create_file(hwmon->device, &tz->temp_input.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_name;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
result = device_create_file(hwmon->device,
&tz->temp_crit.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_input;
}
}
@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
- unregister_hwmon_device:
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ unregister_input:
device_remove_file(hwmon->device, &tz->temp_input.attr);
+ unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
hwmon_device_unregister(hwmon->device);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a4c42a75a3bf..09e8c7d53af3 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
gsm->tty = NULL;
}
-static unsigned int gsmld_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct gsm_mux *gsm = tty->disc_data;
const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
}
/* FASYNC if needed ? */
/* If clogged call tty_throttle(tty); */
-
- return count;
}
/**
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cac666314aef..cea56033b34c 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
- const __u8 *cp, char *fp, int count);
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
+ char *fp, int count);
static void n_hdlc_tty_wakeup(struct tty_struct *tty);
#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
*/
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
- const __u8 *data, char *flags, int count)
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+ char *flags, int count)
{
register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
/* This can happen if stuff comes in on the backup tty */
if (!n_hdlc || tty != n_hdlc->tty)
- return -ENODEV;
+ return;
/* verify line is using HDLC discipline */
if (n_hdlc->magic != HDLC_MAGIC) {
printk("%s(%d) line not using HDLC discipline\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
if ( count>maxframe ) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) rx count>maxframesize, data discarded\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
/* get a free HDLC buffer */
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) no more rx buffers, data discarded\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
/* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (n_hdlc->tty->fasync != NULL)
kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
- return count;
-
} /* end of n_hdlc_tty_receive() */
/**
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index a4bc39c21a43..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count);
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
.owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
return result;
}
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct r3964_info *pInfo = tty->disc_data;
const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
}
}
-
- return count;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 95d0a9c2dd13..0ad32888091c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
+/**
+ * n_tty_set__room - receive space
+ * @tty: terminal
+ *
+ * Called by the driver to find out how much data it is
+ * permitted to feed to the line discipline without any being lost
+ * and thus to manage flow control. Not serialized. Answers for the
+ * "instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+ /* tty->read_cnt is not read locked ? */
+ int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+ int old_left;
+
+ /*
+ * If we are doing input canonicalization, and there are no
+ * pending newlines, let characters through without limit, so
+ * that erase characters will be handled. Other excess
+ * characters will be beeped.
+ */
+ if (left <= 0)
+ left = tty->icanon && !tty->canon_data;
+ old_left = tty->receive_room;
+ tty->receive_room = left;
+
+ /* Did this open up the receive buffer? We may need to flip */
+ if (left && !old_left)
+ schedule_work(&tty->buf.work);
+}
+
static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
{
if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
tty->canon_head = tty->canon_data = tty->erasing = 0;
memset(&tty->read_flags, 0, sizeof tty->read_flags);
+ n_tty_set_room(tty);
check_unthrottle(tty);
}
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
* calls one at a time and in order (or using flush_to_ldisc)
*/
-static unsigned int n_tty_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
const unsigned char *p;
char *f, flags = TTY_NORMAL;
int i;
char buf[64];
unsigned long cpuflags;
- int left;
- int ret = 0;
if (!tty->read_buf)
- return 0;
+ return;
if (tty->real_raw) {
spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- ret += i;
cp += i;
count -= i;
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- ret += i;
spin_unlock_irqrestore(&tty->read_lock, cpuflags);
} else {
- ret = count;
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
tty->ops->flush_chars(tty);
}
+ n_tty_set_room(tty);
+
if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
- left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
-
- if (left < TTY_THRESHOLD_THROTTLE)
+ if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
tty_throttle(tty);
-
- return ret;
}
int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
tty->raw = 1;
tty->real_raw = 1;
+ n_tty_set_room(tty);
return;
}
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
else
tty->real_raw = 0;
}
+ n_tty_set_room(tty);
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
retval = -ERESTARTSYS;
break;
}
+ /* FIXME: does n_tty_set_room need locking ? */
+ n_tty_set_room(tty);
timeout = schedule_timeout(timeout);
continue;
}
@@ -1855,8 +1885,10 @@ do_it_again:
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
- if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+ n_tty_set_room(tty);
check_unthrottle(tty);
+ }
if (b - buf >= minimum)
break;
@@ -1878,6 +1910,7 @@ do_it_again:
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
goto do_it_again;
+ n_tty_set_room(tty);
return retval;
}
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 4b4968a294b2..78e98a5cef96 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv,
static int
pci_omegapci_setup(struct serial_private *priv,
- struct pciserial_board *board,
+ const struct pciserial_board *board,
struct uart_port *port, int idx)
{
return setup_port(priv, port, 2, idx * 8, 0);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 652bdac8ce8e..6d5d6e679fc7 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1420,7 +1420,7 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
- port->line = pdev->id;
+ port->line = data->num;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1bd28450ca40..a764bf99743b 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
int ret = 0;
struct circ_buf *xmit = &max->con_xmit;
- init_waitqueue_head(wq);
pr_info(PR_FMT "start main thread\n");
do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
res = RC_TAG;
ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
if (ret < 0 || res == 0 || res == 0xffff) {
- printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+ dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
res);
ret = -ENODEV;
goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
max->con_xmit.head = 0;
max->con_xmit.tail = 0;
+ init_waitqueue_head(&max->wq);
+
max->main_thread = kthread_run(max3110_main_thread,
max, "max3110_main");
if (IS_ERR(max->main_thread)) {
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index f2cb7503fcb2..465210930890 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1397,6 +1397,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
int fifosize, base_baud;
int port_type;
struct pch_uart_driver_data *board;
+ const char *board_name;
board = &drv_dat[id->driver_data];
port_type = board->port_type;
@@ -1412,7 +1413,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
base_baud = 1843200; /* 1.8432MHz */
/* quirk for CM-iTC board */
- if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC"))
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board_name && strstr(board_name, "CM-iTC"))
base_baud = 192000000; /* 192.0MHz */
switch (port_type) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 46de2e075dac..6c9b7cd6778a 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,10 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
spin_lock_irqsave(&tty->buf.lock, flags);
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
- struct tty_buffer *head, *tail = tty->buf.tail;
- int seen_tail = 0;
+ struct tty_buffer *head;
while ((head = tty->buf.head) != NULL) {
- int copied;
int count;
char *char_buf;
unsigned char *flag_buf;
@@ -425,15 +423,6 @@ static void flush_to_ldisc(struct work_struct *work)
if (!count) {
if (head->next == NULL)
break;
- /*
- There's a possibility tty might get new buffer
- added during the unlock window below. We could
- end up spinning in here forever hogging the CPU
- completely. To avoid this let's have a rest each
- time we processed the tail buffer.
- */
- if (tail == head)
- seen_tail = 1;
tty->buf.head = head->next;
tty_buffer_free(tty, head);
continue;
@@ -443,19 +432,17 @@ static void flush_to_ldisc(struct work_struct *work)
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
+ if (!tty->receive_room)
+ break;
+ if (count > tty->receive_room)
+ count = tty->receive_room;
char_buf = head->char_buf_ptr + head->read;
flag_buf = head->flag_buf_ptr + head->read;
+ head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
- copied = disc->ops->receive_buf(tty, char_buf,
+ disc->ops->receive_buf(tty, char_buf,
flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
-
- head->read += copied;
-
- if (copied == 0 || seen_tail) {
- schedule_work(&tty->buf.work);
- break;
- }
}
clear_bit(TTY_FLUSHING, &tty->flags);
}
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 67b1d0d7c8ac..fb864e7fcd13 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
continue;
}
count = sel_buffer_lth - pasted;
- count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
+ count = min(count, tty->receive_room);
+ tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
NULL, count);
pasted += count;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 395a347f2ebb..dac7676ce21b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1530,6 +1530,8 @@ static const struct usb_device_id acm_ids[] = {
{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */
+ { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e35a17687c05..aa3cc465a601 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
- } else if (!error && !intf->dev.power.in_suspend) {
+ } else if (!error && !intf->dev.power.is_prepared) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- if (!intf->dev.power.in_suspend) {
+ if (!intf->dev.power.is_prepared) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
if (intf->condition == USB_INTERFACE_UNBOUND) {
/* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
+ if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
intf->needs_altsetting0 = 0;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 79a58c3a2e2a..90ae1753dda1 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -339,7 +339,8 @@ static int get_hub_status(struct usb_device *hdev,
{
int i, status = -ETIMEDOUT;
- for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
+ for (i = 0; i < USB_STS_RETRIES &&
+ (status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
data, sizeof(*data), USB_STS_TIMEOUT);
@@ -355,7 +356,8 @@ static int get_port_status(struct usb_device *hdev, int port1,
{
int i, status = -ETIMEDOUT;
- for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
+ for (i = 0; i < USB_STS_RETRIES &&
+ (status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
data, sizeof(*data), USB_STS_TIMEOUT);
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 1b125c224dcf..2278dad886e2 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -389,7 +389,6 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
mutex_unlock(&inode->i_mutex);
if (!error)
d_delete(dentry);
- dput(dentry);
return error;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 58456d1aec21..029e288805b6 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -632,13 +632,10 @@ config USB_DUMMY_HCD
endchoice
+# Selected by UDC drivers that support high-speed operation.
config USB_GADGET_DUALSPEED
bool
depends on USB_GADGET
- default n
- help
- Means that gadget drivers should include extra descriptors
- and code to handle dual-speed controllers.
#
# USB Gadget Drivers
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 6e42aab75806..95e8138cd48f 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -60,6 +60,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 41dc093c0a1b..f4690ffcb489 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -38,6 +38,7 @@
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <mach/hardware.h>
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 61ff927928ab..d3dcabc1a5fc 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1906,6 +1906,7 @@ static int dummy_hcd_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
the_controller = hcd_to_dummy (hcd);
+ hcd->has_tt = 1;
retval = usb_add_hcd(hcd, 0, 0);
if (retval != 0) {
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a01383f71f38..a56876aaf76c 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -431,8 +431,10 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* halt any endpoint by doing a "wrong direction" i/o call */
if (!usb_endpoint_dir_in(&data->desc)) {
- if (usb_endpoint_xfer_isoc(&data->desc))
+ if (usb_endpoint_xfer_isoc(&data->desc)) {
+ mutex_unlock(&data->lock);
return -EINVAL;
+ }
DBG (data->dev, "%s halt\n", data->name);
spin_lock_irq (&data->dev->lock);
if (likely (data->ep != NULL))
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index b62b2640deb0..b1a8146b9d50 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -2083,7 +2083,7 @@ out:
}
#ifdef CONFIG_PM
-static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
+static int mv_udc_suspend(struct device *_dev)
{
struct mv_udc *udc = the_controller;
@@ -2092,7 +2092,7 @@ static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
return 0;
}
-static int mv_udc_resume(struct platform_device *_dev)
+static int mv_udc_resume(struct device *_dev)
{
struct mv_udc *udc = the_controller;
int retval;
@@ -2100,7 +2100,7 @@ static int mv_udc_resume(struct platform_device *_dev)
retval = mv_udc_phy_init(udc->phy_regs);
if (retval) {
dev_err(_dev, "phy initialization error %d\n", retval);
- goto error;
+ return retval;
}
udc_reset(udc);
ep0_reset(udc);
@@ -2122,7 +2122,7 @@ static struct platform_driver udc_driver = {
.owner = THIS_MODULE,
.name = "pxa-u2o",
#ifdef CONFIG_PM
- .pm = mv_udc_pm_ops,
+ .pm = &mv_udc_pm_ops,
#endif
},
};
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 24696f7fa6a9..476d88e1ae97 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -63,6 +63,7 @@
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 365c02fc25fc..774545494cf2 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2216,7 +2216,6 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
driver_name, LUBBOCK_USB_DISC_IRQ, retval);
-lubbock_fail0:
goto err_irq_lub;
}
retval = request_irq(LUBBOCK_USB_IRQ,
@@ -2226,7 +2225,6 @@ lubbock_fail0:
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
driver_name, LUBBOCK_USB_IRQ, retval);
- free_irq(LUBBOCK_USB_DISC_IRQ, dev);
goto lubbock_fail0;
}
} else
@@ -2236,10 +2234,11 @@ lubbock_fail0:
return 0;
#ifdef CONFIG_ARCH_LUBBOCK
+lubbock_fail0:
free_irq(LUBBOCK_USB_DISC_IRQ, dev);
err_irq_lub:
-#endif
free_irq(irq, dev);
+#endif
err_irq1:
if (gpio_is_valid(dev->mach->gpio_pullup))
gpio_free(dev->mach->gpio_pullup);
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index acb9cc418df9..0dfee282878a 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2680,9 +2680,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
writel(0, hsotg->regs + S3C_DAINTMSK);
- dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- readl(hsotg->regs + S3C_DIEPCTL0),
- readl(hsotg->regs + S3C_DOEPCTL0));
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ readl(hsotg->regs + S3C_DIEPCTL0),
+ readl(hsotg->regs + S3C_DOEPCTL0));
/* enable in and out endpoint interrupts */
s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
@@ -2701,7 +2701,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
udelay(10); /* see openiboot */
__bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
- dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
+ dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
/* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
writing to the EPCTL register.. */
@@ -2721,9 +2721,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
s3c_hsotg_enqueue_setup(hsotg);
- dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- readl(hsotg->regs + S3C_DIEPCTL0),
- readl(hsotg->regs + S3C_DOEPCTL0));
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ readl(hsotg->regs + S3C_DIEPCTL0),
+ readl(hsotg->regs + S3C_DOEPCTL0));
/* clear global NAKs */
writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
@@ -2921,9 +2921,9 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
/* setup fifos */
- dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
- readl(hsotg->regs + S3C_GRXFSIZ),
- readl(hsotg->regs + S3C_GNPTXFSIZ));
+ dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+ readl(hsotg->regs + S3C_GRXFSIZ),
+ readl(hsotg->regs + S3C_GNPTXFSIZ));
s3c_hsotg_init_fifo(hsotg);
@@ -2945,6 +2945,7 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
{
+#ifdef DEBUG
struct device *dev = hsotg->dev;
void __iomem *regs = hsotg->regs;
u32 val;
@@ -2987,6 +2988,7 @@ static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
+#endif
}
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index cfe3cf56d6bd..d5e3e1e58626 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
#include <mach/regs-s3c2443-clock.h>
#include <plat/udc.h>
@@ -1301,7 +1302,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
hsudc->uclk = clk_get(&pdev->dev, "usb-device");
if (IS_ERR(hsudc->uclk)) {
dev_err(dev, "failed to find usb-device clock source\n");
- return PTR_ERR(hsudc->uclk);
+ ret = PTR_ERR(hsudc->uclk);
+ goto err_clk;
}
clk_enable(hsudc->uclk);
@@ -1310,7 +1312,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
disable_irq(hsudc->irq);
local_irq_enable();
return 0;
-
+err_clk:
+ free_irq(hsudc->irq, hsudc);
err_irq:
iounmap(hsudc->regs);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 6d8b04061d5d..100f2635cf0a 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -36,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 660b80a75cac..1102ce65a3a9 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
return rc;
}
+static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == 0x1E26;
+}
+
+static void ehci_enable_xhci_companion(void)
+{
+ struct pci_dev *companion = NULL;
+
+ /* The xHCI and EHCI controllers are not on the same PCI slot */
+ for_each_pci_dev(companion) {
+ if (!usb_is_intel_switchable_xhci(companion))
+ continue;
+ usb_enable_xhci_ports(companion);
+ return;
+ }
+}
+
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_ehci(pdev))
+ ehci_enable_xhci_companion();
+
// maybe restore FLADJ
if (time_before(jiffies, ehci->next_statechange))
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index afef7b0a4195..80be5472783a 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -312,8 +312,10 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
return PTR_ERR(usb_clk);
hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
- if (!hcd)
- return -ENOMEM;
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err0;
+ }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -368,6 +370,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
+ err0:
clk_put(usb_clk);
return retval;
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f16c59d5f487..fd930618c28f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -69,6 +69,9 @@
#define NB_PIF0_PWRDOWN_0 0x01100012
#define NB_PIF0_PWRDOWN_1 0x01100013
+#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB3_PSSEN 0xD8
+
static struct amd_chipset_info {
struct pci_dev *nb_dev;
struct pci_dev *smbus_dev;
@@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
return -ETIMEDOUT;
}
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
+}
+EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
+
+/*
+ * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
+ * share some number of ports. These ports can be switched between either
+ * controller. Not all of the ports under the EHCI host controller may be
+ * switchable.
+ *
+ * The ports should be switched over to xHCI before PCI probes for any device
+ * start. This avoids active devices under EHCI being disconnected during the
+ * port switchover, which could cause loss of data on USB storage devices, or
+ * failed boot when the root file system is on a USB mass storage device and is
+ * enumerated under EHCI first.
+ *
+ * We write into the xHC's PCI configuration space in some Intel-specific
+ * registers to switch the ports over. The USB 3.0 terminations and the USB
+ * 2.0 data wires are switched separately. We want to enable the SuperSpeed
+ * terminations before switching the USB 2.0 wires over, so that USB 3.0
+ * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
+ */
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ u32 ports_available;
+
+ ports_available = 0xffffffff;
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+ * Register, to turn on SuperSpeed terminations for all
+ * available ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+ ports_available = 0xffffffff;
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
+ "to xHCI: 0x%x\n", ports_available);
+}
+EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
@@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
writel(XHCI_LEGACY_DISABLE_SMI,
base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
hc_init:
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6ae9f78e9938..b1002a8ef96f 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
#else
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2e0486178dbe..1f50b4468e87 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -438,13 +438,13 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
- case 0:
+ case SLOT_STATE_ENABLED:
return "enabled/disabled";
- case 1:
+ case SLOT_STATE_DEFAULT:
return "default";
- case 2:
+ case SLOT_STATE_ADDRESSED:
return "addressed";
- case 3:
+ case SLOT_STATE_CONFIGURED:
return "configured";
default:
return "reserved";
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 26caba4c1950..0f8e1d29a858 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -985,9 +985,19 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
if (interval != ep->desc.bInterval - 1)
dev_warn(&udev->dev,
- "ep %#x - rounding interval to %d microframes\n",
+ "ep %#x - rounding interval to %d %sframes\n",
ep->desc.bEndpointAddress,
- 1 << interval);
+ 1 << interval,
+ udev->speed == USB_SPEED_FULL ? "" : "micro");
+
+ if (udev->speed == USB_SPEED_FULL) {
+ /*
+ * Full speed isoc endpoints specify interval in frames,
+ * not microframes. We are using microframes everywhere,
+ * so adjust accordingly.
+ */
+ interval += 3; /* 1 frame = 2^3 uframes */
+ }
return interval;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cbc4d491e626..17541d09eabb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -106,18 +106,34 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
- pdev->revision == 0x0) {
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) {
+ if (pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
" endpoint cmd after reset endpoint\n");
+ }
+ /* Fresco Logic confirms: all revisions of this chip do not
+ * support MSI, even though some of them claim to in their PCI
+ * capabilities.
+ */
+ xhci->quirks |= XHCI_BROKEN_MSI;
+ xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
+ "has broken MSI implementation\n",
+ pdev->revision);
}
+
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ }
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
@@ -242,8 +258,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+
retval = xhci_resume(xhci, hibernated);
return retval;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 237a765f8d18..800f417c7309 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
next = ring->dequeue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
}
/*
@@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
next = ring->enqueue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
}
/*
@@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
}
}
@@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id])
+ if (xhci->devs[slot_id]) {
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci,
+ xhci->devs[slot_id], true);
xhci_free_virt_device(xhci, slot_id);
+ }
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
@@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
- xhci_dbg(xhci, "Successful control transfer!\n");
*status = 0;
}
break;
@@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
- xhci_dbg(xhci, "Successful isoc transfer!\n");
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
@@ -1791,7 +1782,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct usb_iso_packet_descriptor *frame;
int idx;
- ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
@@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
else
*status = 0;
} else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
*status = 0;
}
break;
@@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(le32_to_cpu(event->transfer_len)));
+ if (trb_comp_code == COMP_SHORT_TX)
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
@@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* Some host controllers give a spurious
+ * successful event after a short transfer.
+ * Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ ret = 0;
+ goto cleanup;
+ }
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
+ if (trb_comp_code == COMP_SHORT_TX)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
@@ -2149,9 +2148,15 @@ cleanup:
xhci_urb_free_priv(xhci, urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "status = %d\n",
- urb, urb->actual_length, status);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags &
+ URB_SHORT_NOT_OK)) ||
+ status != 0)
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "expected = %x, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length,
+ status);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
@@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1;
int ret;
- xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
return 0;
@@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
xhci->error_bitmask |= 1 << 2;
return 0;
}
- xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
/* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
- xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd);
- xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
break;
case TRB_TYPE(TRB_PORT_STATUS):
- xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
handle_port_status(xhci, event);
- xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
- xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
ret = handle_tx_event(xhci, &event->trans_event);
- xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
@@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_unlock(&xhci->lock);
return IRQ_NONE;
}
- xhci_dbg(xhci, "op reg status = %08x\n", status);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- (unsigned int) le32_to_cpu(trb->link.intr_target),
- (unsigned int) le32_to_cpu(trb->link.control));
-
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
@@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
- xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
switch (ep_state) {
case EP_STATE_DISABLED:
/*
@@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
- xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8f2a56ece44f..06e7023258d0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -430,12 +430,19 @@ int xhci_run(struct usb_hcd *hcd)
free_irq(hcd->irq, hcd);
hcd->irq = -1;
+ /* Some Fresco Logic host controllers advertise MSI, but fail to
+ * generate interrupts. Don't even try to enable MSI.
+ */
+ if (xhci->quirks & XHCI_BROKEN_MSI)
+ goto legacy_irq;
+
ret = xhci_setup_msix(xhci);
if (ret)
/* fall back to msi*/
ret = xhci_setup_msi(xhci);
if (ret) {
+legacy_irq:
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
@@ -1314,8 +1321,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1401,6 +1410,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return ret;
}
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1578,6 +1589,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
return ret;
}
+static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ /* Ignore the slot flag (bit 0), and the default control endpoint flag
+ * (bit 1). The default control endpoint is added during the Address
+ * Device command and is never removed until the slot is disabled.
+ */
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ /* Use hweight32 to count the number of ones in the add flags, or
+ * number of endpoints added. Don't count endpoints that are changed
+ * (both added and dropped).
+ */
+ return hweight32(valid_add_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ return hweight32(valid_drop_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+/*
+ * We need to reserve the new number of endpoints before the configure endpoint
+ * command completes. We can't subtract the dropped endpoints from the number
+ * of active endpoints until the command completes because we can oversubscribe
+ * the host in this case:
+ *
+ * - the first configure endpoint command drops more endpoints than it adds
+ * - a second configure endpoint command that adds more endpoints is queued
+ * - the first configure endpoint command fails, so the config is unchanged
+ * - the second command may succeed, even though there isn't enough resources
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 added_eps;
+
+ added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add %u, limit is %u.\n",
+ xhci->num_active_eps, added_eps,
+ xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += added_eps;
+ xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
+ xhci->num_active_eps);
+ return 0;
+}
+
+/*
+ * The configure endpoint was failed by the xHC for some other reason, so we
+ * need to revert the resources that failed configuration would have used.
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_free_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_failed_eps;
+
+ num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_failed_eps;
+ xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
+ num_failed_eps,
+ xhci->num_active_eps);
+}
+
+/*
+ * Now that the command has completed, clean up the active endpoint count by
+ * subtracting out the endpoints that were dropped (but not changed).
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_dropped_eps;
+
+ num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
+ num_dropped_eps,
+ xhci->num_active_eps);
+}
+
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
@@ -1598,6 +1716,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
virt_dev = xhci->devs[udev->slot_id];
if (command) {
in_ctx = command->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
@@ -1613,6 +1740,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
in_ctx = virt_dev->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
@@ -1627,6 +1762,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if (ret < 0) {
if (command)
list_del(&command->cmd_list);
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1649,8 +1786,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
if (!ctx_change)
- return xhci_configure_endpoint_result(xhci, udev, cmd_status);
- return xhci_evaluate_context_result(xhci, udev, cmd_status);
+ ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
+ else
+ ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* If the command failed, remove the reserved resources.
+ * Otherwise, clean up the estimate to include dropped eps.
+ */
+ if (ret)
+ xhci_free_host_resources(xhci, in_ctx);
+ else
+ xhci_finish_resource_reservation(xhci, in_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ return ret;
}
/* Called after one or more calls to xhci_add_endpoint() or
@@ -1676,6 +1827,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
@@ -1703,8 +1856,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; ++i) {
- if ((ctrl_ctx->drop_flags & (1 << (i + 1))) &&
- !(ctrl_ctx->add_flags & (1 << (i + 1))))
+ if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
+ !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
xhci_zero_in_ctx(xhci, virt_dev);
@@ -2266,6 +2419,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
}
/*
+ * Deletes endpoint resources for endpoints that were active before a Reset
+ * Device command, or a Disable Slot command. The Reset Device command leaves
+ * the control endpoint intact, whereas the Disable Slot command deletes it.
+ *
+ * Must be called with xhci->lock held.
+ */
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep)
+{
+ int i;
+ unsigned int num_dropped_eps = 0;
+ unsigned int drop_flags = 0;
+
+ for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
+ if (virt_dev->eps[i].ring) {
+ drop_flags |= 1 << i;
+ num_dropped_eps++;
+ }
+ }
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
+ "%u now active.\n",
+ num_dropped_eps, drop_flags,
+ xhci->num_active_eps);
+}
+
+/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
@@ -2293,6 +2474,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_command *reset_device_cmd;
int timeleft;
int last_freed_endpoint;
+ struct xhci_slot_ctx *slot_ctx;
ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
if (ret <= 0)
@@ -2325,6 +2507,12 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
return -EINVAL;
}
+ /* If device is not setup, there is no point in resetting it */
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+ SLOT_STATE_DISABLED)
+ return 0;
+
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
/* Allocate the command structure that holds the struct completion.
* Assume we're in process context, since the normal device reset
@@ -2406,6 +2594,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
goto command_cleanup;
}
+ /* Free up host controller endpoint resources */
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Don't delete the default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, false);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
@@ -2479,6 +2675,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
/*
+ * Checks if we have enough host controller resources for the default control
+ * endpoint.
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+{
+ if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add 1, limit is %u.\n",
+ xhci->num_active_eps, xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += 1;
+ xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
+ xhci->num_active_eps);
+ return 0;
+}
+
+
+/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
@@ -2513,24 +2730,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
- /* xhci_alloc_virt_device() does not touch rings; no need to lock.
- * Use GFP_NOIO, since this function can be called from
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_reserve_host_control_ep_resources(xhci);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ goto disable_slot;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ /* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
- /* Disable slot, if we can do it without mem alloc */
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
- spin_lock_irqsave(&xhci->lock, flags);
- if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
- return 0;
+ goto disable_slot;
}
udev->slot_id = xhci->slot_id;
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
+
+disable_slot:
+ /* Disable slot, if we can do it without mem alloc */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e12db7cfb9bb..7d1ea3bf5e1f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -560,6 +560,11 @@ struct xhci_slot_ctx {
#define SLOT_STATE (0x1f << 27)
#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
+#define SLOT_STATE_DISABLED 0
+#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT 1
+#define SLOT_STATE_ADDRESSED 2
+#define SLOT_STATE_CONFIGURED 3
/**
* struct xhci_ep_ctx
@@ -1123,6 +1128,7 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
+ bool last_td_was_short;
};
struct xhci_erst_entry {
@@ -1290,6 +1296,20 @@ struct xhci_hcd {
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
+#define XHCI_SPURIOUS_SUCCESS (1 << 4)
+/*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle. Ideally, they would signal that they can't handle
+ * anymore endpoint contexts by returning a Resource Error for the Configure
+ * Endpoint command, but they don't. Instead they expect software to keep track
+ * of the number of active endpoints for them, across configure endpoint
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+#define XHCI_EP_LIMIT_QUIRK (1 << 5)
+#define XHCI_BROKEN_MSI (1 << 6)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
@@ -1338,9 +1358,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __le32 __iomem *regs)
{
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
- regs, val);
writel(val, regs);
}
@@ -1368,9 +1385,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
- regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
@@ -1439,6 +1453,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ab8e1001e5e2..c71b0372786e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -96,6 +96,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kobject.h>
+#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index 3f2e07011a48..cfb5aa72b196 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -100,6 +100,7 @@ struct twl6030_usb {
u8 linkstat;
u8 asleep;
bool irq_enabled;
+ unsigned long features;
};
#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg)
@@ -204,6 +205,12 @@ static int twl6030_start_srp(struct otg_transceiver *x)
static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
{
+ char *regulator_name;
+
+ if (twl->features & TWL6025_SUBCLASS)
+ regulator_name = "ldousb";
+ else
+ regulator_name = "vusb";
/* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
@@ -214,7 +221,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
/* Program MISC2 register and set bit VUSB_IN_VBAT */
twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
- twl->usb3v3 = regulator_get(twl->dev, "vusb");
+ twl->usb3v3 = regulator_get(twl->dev, regulator_name);
if (IS_ERR(twl->usb3v3))
return -ENODEV;
@@ -409,6 +416,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
twl->dev = &pdev->dev;
twl->irq1 = platform_get_irq(pdev, 0);
twl->irq2 = platform_get_irq(pdev, 1);
+ twl->features = pdata->features;
twl->otg.dev = twl->dev;
twl->otg.label = "twl6030";
twl->otg.set_host = twl6030_set_host;
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 206cfabc9286..547486ccd059 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1380,5 +1380,6 @@ void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+ kfree(gpriv->uep);
kfree(gpriv);
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e8dbde55f6c5..162728977553 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -647,6 +647,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1d946cd238ba..ab1fcdf3c378 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -351,6 +351,7 @@
*/
#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
+#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
/*
* Linx Technologies product ids
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 318dd00040a3..60b25d8ea0e2 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -311,10 +311,6 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_AC2726 0xfff5
#define ZTE_PRODUCT_AC8710T 0xffff
-/* ZTE PRODUCTS -- alternate vendor ID */
-#define ZTE_VENDOR_ID2 0x1d6b
-#define ZTE_PRODUCT_MF_330 0x0002
-
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -340,11 +336,12 @@ static void option_instat_callback(struct urb *urb);
#define TOSHIBA_PRODUCT_G450 0x0d45
#define ALINK_VENDOR_ID 0x1e0e
+#define ALINK_PRODUCT_PH300 0x9100
#define ALINK_PRODUCT_3GU 0x9200
/* ALCATEL PRODUCTS */
#define ALCATEL_VENDOR_ID 0x1bbb
-#define ALCATEL_PRODUCT_X060S 0x0000
+#define ALCATEL_PRODUCT_X060S_X200 0x0000
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -379,6 +376,9 @@ static void option_instat_callback(struct urb *urb);
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+/* Zoom */
+#define ZOOM_PRODUCT_4597 0x9607
+
/* Haier products */
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
@@ -432,6 +432,20 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
.reason = OPTION_BLACKLIST_SENDSETUP
};
+static const u8 alcatel_x200_no_sendsetup[] = { 0, 1 };
+static const struct option_blacklist_info alcatel_x200_blacklist = {
+ .infolen = ARRAY_SIZE(alcatel_x200_no_sendsetup),
+ .ifaceinfo = alcatel_x200_no_sendsetup,
+ .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
+static const u8 zte_k3765_z_no_sendsetup[] = { 0, 1, 2 };
+static const struct option_blacklist_info zte_k3765_z_blacklist = {
+ .infolen = ARRAY_SIZE(zte_k3765_z_no_sendsetup),
+ .ifaceinfo = zte_k3765_z_no_sendsetup,
+ .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -916,13 +930,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
- { USB_DEVICE(ZTE_VENDOR_ID2, ZTE_PRODUCT_MF_330) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -935,13 +949,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.driver_info = (kernel_ulong_t)&four_g_w14_blacklist
},
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
/* Pirelli */
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 00418995d8e9..e8ae21b2d387 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -819,6 +819,35 @@ Retry_Sense:
}
}
+ /*
+ * Some devices don't work or return incorrect data the first
+ * time they get a READ(10) command, or for the first READ(10)
+ * after a media change. If the INITIAL_READ10 flag is set,
+ * keep track of whether READ(10) commands succeed. If the
+ * previous one succeeded and this one failed, set the REDO_READ10
+ * flag to force a retry.
+ */
+ if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
+ srb->cmnd[0] == READ_10)) {
+ if (srb->result == SAM_STAT_GOOD) {
+ set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
+ } else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
+ clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
+ set_bit(US_FLIDX_REDO_READ10, &us->dflags);
+ }
+
+ /*
+ * Next, if the REDO_READ10 flag is set, return a result
+ * code that will cause the SCSI core to retry the READ(10)
+ * command immediately.
+ */
+ if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
+ clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
+ srb->result = DID_IMM_RETRY << 16;
+ srb->sense_buffer[0] = 0;
+ }
+ }
+
/* Did we transfer less than the minimum amount required? */
if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c1602b8c5594..ccff3483eebc 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1114,6 +1114,16 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
+ * This card reader returns "Illegal Request, Logical Block Address
+ * Out of Range" for the first READ(10) after a new card is inserted.
+ */
+UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
+ "Feiya",
+ "SD/SDHC Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_INITIAL_READ10 ),
+
/* This Pentax still camera is not conformant
* to the USB storage specification: -
* - It does not like the INQUIRY command. So we must handle this command
@@ -1888,6 +1898,15 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_READ_DISC_INFO ),
+/* Reported by Sven Geggus <sven-usbst@geggus.net>
+ * This encrypted pen drive returns bogus data for the initial READ(10).
+ */
+UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
+ "Corsair",
+ "Padlock v2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_INITIAL_READ10 ),
+
/* Patch by Richard Schütz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5ee7ac42e08f..0ca095820f3e 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -440,7 +440,8 @@ static void adjust_quirks(struct us_data *us)
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
- US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16);
+ US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+ US_FL_INITIAL_READ10);
p = quirks;
while (*p) {
@@ -490,6 +491,9 @@ static void adjust_quirks(struct us_data *us)
case 'm':
f |= US_FL_MAX_SECTORS_64;
break;
+ case 'n':
+ f |= US_FL_INITIAL_READ10;
+ break;
case 'o':
f |= US_FL_CAPACITY_OK;
break;
@@ -953,6 +957,13 @@ int usb_stor_probe2(struct us_data *us)
if (result)
goto BadDevice;
+ /*
+ * If the device returns invalid data for the first READ(10)
+ * command, indicate the command should be retried.
+ */
+ if (us->fflags & US_FL_INITIAL_READ10)
+ set_bit(US_FLIDX_REDO_READ10, &us->dflags);
+
/* Acquire all the other resources and add the host */
result = usb_stor_acquire_resources(us);
if (result)
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 89d3bfff98df..7b0f2113632e 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -73,6 +73,8 @@ struct us_unusual_dev {
#define US_FLIDX_RESETTING 4 /* device reset in progress */
#define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
+#define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */
+#define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */
#define USB_STOR_STRING_LEN 32
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2f7c76a85e53..e224a92baa16 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -144,7 +144,7 @@ static void handle_tx(struct vhost_net *net)
}
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
if (wmem < sock->sk->sk_sndbuf / 2)
tx_poll_stop(net);
@@ -166,8 +166,8 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
- if (unlikely(vhost_enable_notify(vq))) {
- vhost_disable_notify(vq);
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
continue;
}
break;
@@ -315,7 +315,7 @@ static void handle_rx(struct vhost_net *net)
return;
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
vhost_hlen = vq->vhost_hlen;
sock_hlen = vq->sock_hlen;
@@ -334,10 +334,10 @@ static void handle_rx(struct vhost_net *net)
break;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
- if (unlikely(vhost_enable_notify(vq))) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
continue;
}
/* Nothing new? Wait for eventfd to tell us
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 099f30230d06..734e1d74ad80 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
return;
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&n->dev, vq);
for (;;) {
head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
@@ -61,8 +61,8 @@ static void handle_vq(struct vhost_test *n)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- if (unlikely(vhost_enable_notify(vq))) {
- vhost_disable_notify(vq);
+ if (unlikely(vhost_enable_notify(&n->dev, vq))) {
+ vhost_disable_notify(&n->dev, vq);
continue;
}
break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 7aa4eea930f1..ea966b356352 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -37,6 +37,9 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
+#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -161,6 +164,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->last_avail_idx = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
+ vq->signalled_used = 0;
+ vq->signalled_used_valid = false;
vq->used_flags = 0;
vq->log_used = false;
vq->log_addr = -1ull;
@@ -489,16 +494,17 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
-static int vq_access_ok(unsigned int num,
+static int vq_access_ok(struct vhost_dev *d, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
{
+ size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
- sizeof *avail + num * sizeof *avail->ring) &&
+ sizeof *avail + num * sizeof *avail->ring + s) &&
access_ok(VERIFY_WRITE, used,
- sizeof *used + num * sizeof *used->ring);
+ sizeof *used + num * sizeof *used->ring + s);
}
/* Can we log writes? */
@@ -514,9 +520,11 @@ int vhost_log_access_ok(struct vhost_dev *dev)
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
-static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
+static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
+ void __user *log_base)
{
struct vhost_memory *mp;
+ size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
mp = rcu_dereference_protected(vq->dev->memory,
lockdep_is_held(&vq->mutex));
@@ -524,15 +532,15 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
- vq->num * sizeof *vq->used->ring));
+ vq->num * sizeof *vq->used->ring + s));
}
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq, vq->log_base);
+ return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
+ vq_log_access_ok(vq->dev, vq, vq->log_base);
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
@@ -577,6 +585,7 @@ static int init_used(struct vhost_virtqueue *vq,
if (r)
return r;
+ vq->signalled_used_valid = false;
return get_user(vq->last_used_idx, &used->idx);
}
@@ -674,7 +683,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
* If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */
if (vq->private_data) {
- if (!vq_access_ok(vq->num,
+ if (!vq_access_ok(d, vq->num,
(void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr)) {
@@ -818,7 +827,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
vq = d->vqs + i;
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
- if (vq->private_data && !vq_log_access_ok(vq, base))
+ if (vq->private_data && !vq_log_access_ok(d, vq, base))
r = -EFAULT;
else
vq->log_base = base;
@@ -1219,6 +1228,10 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
/* On success, increment avail index. */
vq->last_avail_idx++;
+
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
@@ -1267,6 +1280,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
eventfd_signal(vq->log_ctx, 1);
}
vq->last_used_idx++;
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely(vq->last_used_idx == vq->signalled_used))
+ vq->signalled_used_valid = false;
return 0;
}
@@ -1275,6 +1294,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
unsigned count)
{
struct vring_used_elem __user *used;
+ u16 old, new;
int start;
start = vq->last_used_idx % vq->num;
@@ -1292,7 +1312,14 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
((void __user *)used - (void __user *)vq->used),
count * sizeof *used);
}
- vq->last_used_idx += count;
+ old = vq->last_used_idx;
+ new = (vq->last_used_idx += count);
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
+ vq->signalled_used_valid = false;
return 0;
}
@@ -1331,29 +1358,47 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return r;
}
-/* This actually signals the guest, using eventfd. */
-void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __u16 flags;
-
+ __u16 old, new, event;
+ bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
- if (__get_user(flags, &vq->avail->flags)) {
- vq_err(vq, "Failed to get flags");
- return;
+ if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ unlikely(vq->avail_idx == vq->last_avail_idx))
+ return true;
+
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ __u16 flags;
+ if (__get_user(flags, &vq->avail->flags)) {
+ vq_err(vq, "Failed to get flags");
+ return true;
+ }
+ return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
}
+ old = vq->signalled_used;
+ v = vq->signalled_used_valid;
+ new = vq->signalled_used = vq->last_used_idx;
+ vq->signalled_used_valid = true;
- /* If they don't want an interrupt, don't signal, unless empty. */
- if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
- (vq->avail_idx != vq->last_avail_idx ||
- !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
- return;
+ if (unlikely(!v))
+ return true;
+ if (get_user(event, vhost_used_event(vq))) {
+ vq_err(vq, "Failed to get used event idx");
+ return true;
+ }
+ return vring_need_event(event, new, old);
+}
+
+/* This actually signals the guest, using eventfd. */
+void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
/* Signal the Guest tell them we used something up. */
- if (vq->call_ctx)
+ if (vq->call_ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx, 1);
}
@@ -1376,7 +1421,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
}
/* OK, now we need to know about added descriptors. */
-bool vhost_enable_notify(struct vhost_virtqueue *vq)
+bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
u16 avail_idx;
int r;
@@ -1384,11 +1429,34 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
- r = put_user(vq->used_flags, &vq->used->flags);
- if (r) {
- vq_err(vq, "Failed to enable notification at %p: %d\n",
- &vq->used->flags, r);
- return false;
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r) {
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ return false;
+ }
+ } else {
+ r = put_user(vq->avail_idx, vhost_avail_event(vq));
+ if (r) {
+ vq_err(vq, "Failed to update avail event index at %p: %d\n",
+ vhost_avail_event(vq), r);
+ return false;
+ }
+ }
+ if (unlikely(vq->log_used)) {
+ void __user *used;
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ?
+ &vq->used->flags : vhost_avail_event(vq);
+ /* Log used flags or event index entry write. Both are 16 bit
+ * fields. */
+ log_write(vq->log_base, vq->log_addr +
+ (used - (void __user *)vq->used),
+ sizeof(u16));
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
@@ -1404,15 +1472,17 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
}
/* We don't need to be notified again. */
-void vhost_disable_notify(struct vhost_virtqueue *vq)
+void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
int r;
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
- r = put_user(vq->used_flags, &vq->used->flags);
- if (r)
- vq_err(vq, "Failed to enable notification at %p: %d\n",
- &vq->used->flags, r);
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r)
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ }
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3363ae38518..8e03379dd30f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -84,6 +84,12 @@ struct vhost_virtqueue {
/* Used flags */
u16 used_flags;
+ /* Last used index value we have signalled on */
+ u16 signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
/* Log writes to used structure. */
bool log_used;
u64 log_addr;
@@ -149,8 +155,8 @@ void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
struct vring_used_elem *heads, unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
-void vhost_disable_notify(struct vhost_virtqueue *);
-bool vhost_enable_notify(struct vhost_virtqueue *);
+void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
@@ -162,11 +168,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
} while (0)
enum {
- VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
- (1 << VIRTIO_RING_F_INDIRECT_DESC) |
- (1 << VHOST_F_LOG_ALL) |
- (1 << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1 << VIRTIO_NET_F_MRG_RXBUF),
+ VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) |
+ (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF),
};
static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 3ec4923c2d84..c22e8d39a2cb 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -515,11 +515,10 @@ static int __devinit arcfb_probe(struct platform_device *dev)
/* We need a flat backing store for the Arc's
less-flat actual paged framebuffer */
- if (!(videomemory = vmalloc(videomemorysize)))
+ videomemory = vzalloc(videomemorysize);
+ if (!videomemory)
return retval;
- memset(videomemory, 0, videomemorysize);
-
info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
if (!info)
goto err;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index ebb893c49e90..d7aaec5667bf 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -248,10 +248,6 @@ static int atyfb_sync(struct fb_info *info);
static int aty_init(struct fb_info *info);
-#ifdef CONFIG_ATARI
-static int store_video_par(char *videopar, unsigned char m64_num);
-#endif
-
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -2268,11 +2264,13 @@ error:
return;
}
+#ifdef CONFIG_PCI
static void aty_bl_exit(struct backlight_device *bd)
{
backlight_device_unregister(bd);
printk("aty: Backlight unloaded\n");
}
+#endif /* CONFIG_PCI */
#endif /* CONFIG_FB_ATY_BACKLIGHT */
@@ -2789,7 +2787,7 @@ aty_init_exit:
return ret;
}
-#ifdef CONFIG_ATARI
+#if defined(CONFIG_ATARI) && !defined(MODULE)
static int __devinit store_video_par(char *video_str, unsigned char m64_num)
{
char *p;
@@ -2818,7 +2816,7 @@ static int __devinit store_video_par(char *video_str, unsigned char m64_num)
phys_vmembase[m64_num] = 0;
return -1;
}
-#endif /* CONFIG_ATARI */
+#endif /* CONFIG_ATARI && !MODULE */
/*
* Blank the display.
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0c9373bedd1f..2d93c8d61ad5 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -302,6 +302,18 @@ config BACKLIGHT_ADP8860
To compile this driver as a module, choose M here: the module will
be called adp8860_bl.
+config BACKLIGHT_ADP8870
+ tristate "Backlight Driver for ADP8870 using WLED"
+ depends on BACKLIGHT_CLASS_DEVICE && I2C
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ If you have a LCD backlight connected to the ADP8870,
+ say Y here to enable this driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp8870_bl.
+
config BACKLIGHT_88PM860X
tristate "Backlight Driver for 88PM8606 using WLED"
depends on MFD_88PM860X
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index b9ca8490df87..ee72adb8786e 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
new file mode 100644
index 000000000000..05a8832bb3eb
--- /dev/null
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -0,0 +1,1012 @@
+/*
+ * Backlight driver for Analog Devices ADP8870 Backlight Devices
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/i2c/adp8870.h>
+#define ADP8870_EXT_FEATURES
+#define ADP8870_USE_LEDS
+
+
+#define ADP8870_MFDVID 0x00 /* Manufacturer and device ID */
+#define ADP8870_MDCR 0x01 /* Device mode and status */
+#define ADP8870_INT_STAT 0x02 /* Interrupts status */
+#define ADP8870_INT_EN 0x03 /* Interrupts enable */
+#define ADP8870_CFGR 0x04 /* Configuration register */
+#define ADP8870_BLSEL 0x05 /* Sink enable backlight or independent */
+#define ADP8870_PWMLED 0x06 /* PWM Enable Selection Register */
+#define ADP8870_BLOFF 0x07 /* Backlight off timeout */
+#define ADP8870_BLDIM 0x08 /* Backlight dim timeout */
+#define ADP8870_BLFR 0x09 /* Backlight fade in and out rates */
+#define ADP8870_BLMX1 0x0A /* Backlight (Brightness Level 1-daylight) maximum current */
+#define ADP8870_BLDM1 0x0B /* Backlight (Brightness Level 1-daylight) dim current */
+#define ADP8870_BLMX2 0x0C /* Backlight (Brightness Level 2-bright) maximum current */
+#define ADP8870_BLDM2 0x0D /* Backlight (Brightness Level 2-bright) dim current */
+#define ADP8870_BLMX3 0x0E /* Backlight (Brightness Level 3-office) maximum current */
+#define ADP8870_BLDM3 0x0F /* Backlight (Brightness Level 3-office) dim current */
+#define ADP8870_BLMX4 0x10 /* Backlight (Brightness Level 4-indoor) maximum current */
+#define ADP8870_BLDM4 0x11 /* Backlight (Brightness Level 4-indoor) dim current */
+#define ADP8870_BLMX5 0x12 /* Backlight (Brightness Level 5-dark) maximum current */
+#define ADP8870_BLDM5 0x13 /* Backlight (Brightness Level 5-dark) dim current */
+#define ADP8870_ISCLAW 0x1A /* Independent sink current fade law register */
+#define ADP8870_ISCC 0x1B /* Independent sink current control register */
+#define ADP8870_ISCT1 0x1C /* Independent Sink Current Timer Register LED[7:5] */
+#define ADP8870_ISCT2 0x1D /* Independent Sink Current Timer Register LED[4:1] */
+#define ADP8870_ISCF 0x1E /* Independent sink current fade register */
+#define ADP8870_ISC1 0x1F /* Independent Sink Current LED1 */
+#define ADP8870_ISC2 0x20 /* Independent Sink Current LED2 */
+#define ADP8870_ISC3 0x21 /* Independent Sink Current LED3 */
+#define ADP8870_ISC4 0x22 /* Independent Sink Current LED4 */
+#define ADP8870_ISC5 0x23 /* Independent Sink Current LED5 */
+#define ADP8870_ISC6 0x24 /* Independent Sink Current LED6 */
+#define ADP8870_ISC7 0x25 /* Independent Sink Current LED7 (Brightness Level 1-daylight) */
+#define ADP8870_ISC7_L2 0x26 /* Independent Sink Current LED7 (Brightness Level 2-bright) */
+#define ADP8870_ISC7_L3 0x27 /* Independent Sink Current LED7 (Brightness Level 3-office) */
+#define ADP8870_ISC7_L4 0x28 /* Independent Sink Current LED7 (Brightness Level 4-indoor) */
+#define ADP8870_ISC7_L5 0x29 /* Independent Sink Current LED7 (Brightness Level 5-dark) */
+#define ADP8870_CMP_CTL 0x2D /* ALS Comparator Control Register */
+#define ADP8870_ALS1_EN 0x2E /* Main ALS comparator level enable */
+#define ADP8870_ALS2_EN 0x2F /* Second ALS comparator level enable */
+#define ADP8870_ALS1_STAT 0x30 /* Main ALS Comparator Status Register */
+#define ADP8870_ALS2_STAT 0x31 /* Second ALS Comparator Status Register */
+#define ADP8870_L2TRP 0x32 /* L2 comparator reference */
+#define ADP8870_L2HYS 0x33 /* L2 hysteresis */
+#define ADP8870_L3TRP 0x34 /* L3 comparator reference */
+#define ADP8870_L3HYS 0x35 /* L3 hysteresis */
+#define ADP8870_L4TRP 0x36 /* L4 comparator reference */
+#define ADP8870_L4HYS 0x37 /* L4 hysteresis */
+#define ADP8870_L5TRP 0x38 /* L5 comparator reference */
+#define ADP8870_L5HYS 0x39 /* L5 hysteresis */
+#define ADP8870_PH1LEVL 0x40 /* First phototransistor ambient light level-low byte register */
+#define ADP8870_PH1LEVH 0x41 /* First phototransistor ambient light level-high byte register */
+#define ADP8870_PH2LEVL 0x42 /* Second phototransistor ambient light level-low byte register */
+#define ADP8870_PH2LEVH 0x43 /* Second phototransistor ambient light level-high byte register */
+
+#define ADP8870_MANUFID 0x3 /* Analog Devices AD8870 Manufacturer and device ID */
+#define ADP8870_DEVID(x) ((x) & 0xF)
+#define ADP8870_MANID(x) ((x) >> 4)
+
+/* MDCR Device mode and status */
+#define D7ALSEN (1 << 7)
+#define INT_CFG (1 << 6)
+#define NSTBY (1 << 5)
+#define DIM_EN (1 << 4)
+#define GDWN_DIS (1 << 3)
+#define SIS_EN (1 << 2)
+#define CMP_AUTOEN (1 << 1)
+#define BLEN (1 << 0)
+
+/* ADP8870_ALS1_EN Main ALS comparator level enable */
+#define L5_EN (1 << 3)
+#define L4_EN (1 << 2)
+#define L3_EN (1 << 1)
+#define L2_EN (1 << 0)
+
+#define CFGR_BLV_SHIFT 3
+#define CFGR_BLV_MASK 0x7
+#define ADP8870_FLAG_LED_MASK 0xFF
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
+#define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1)
+
+struct adp8870_bl {
+ struct i2c_client *client;
+ struct backlight_device *bl;
+ struct adp8870_led *led;
+ struct adp8870_backlight_platform_data *pdata;
+ struct mutex lock;
+ unsigned long cached_daylight_max;
+ int id;
+ int revid;
+ int current_brightness;
+};
+
+struct adp8870_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct i2c_client *client;
+ enum led_brightness new_brightness;
+ int id;
+ int flags;
+};
+
+static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = ret;
+ return 0;
+}
+
+
+static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret)
+ dev_err(&client->dev, "failed to write\n");
+
+ return ret;
+}
+
+static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adp8870_read(client, reg, &reg_val);
+
+ if (!ret && ((reg_val & bit_mask) == 0)) {
+ reg_val |= bit_mask;
+ ret = adp8870_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adp8870_read(client, reg, &reg_val);
+
+ if (!ret && (reg_val & bit_mask)) {
+ reg_val &= ~bit_mask;
+ ret = adp8870_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+/*
+ * Independent sink / LED
+ */
+#if defined(ADP8870_USE_LEDS)
+static void adp8870_led_work(struct work_struct *work)
+{
+ struct adp8870_led *led = container_of(work, struct adp8870_led, work);
+ adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
+ led->new_brightness >> 1);
+}
+
+static void adp8870_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct adp8870_led *led;
+
+ led = container_of(led_cdev, struct adp8870_led, cdev);
+ led->new_brightness = value;
+ /*
+ * Use workqueue for IO since I2C operations can sleep.
+ */
+ schedule_work(&led->work);
+}
+
+static int adp8870_led_setup(struct adp8870_led *led)
+{
+ struct i2c_client *client = led->client;
+ int ret = 0;
+
+ ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0);
+ if (ret)
+ return ret;
+
+ ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1));
+ if (ret)
+ return ret;
+
+ if (led->id > 4)
+ ret = adp8870_set_bits(client, ADP8870_ISCT1,
+ (led->flags & 0x3) << ((led->id - 5) * 2));
+ else
+ ret = adp8870_set_bits(client, ADP8870_ISCT2,
+ (led->flags & 0x3) << ((led->id - 1) * 2));
+
+ return ret;
+}
+
+static int __devinit adp8870_led_probe(struct i2c_client *client)
+{
+ struct adp8870_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ struct adp8870_led *led, *led_dat;
+ struct led_info *cur_led;
+ int ret, i;
+
+
+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&client->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
+ if (ret)
+ goto err_free;
+
+ ret = adp8870_write(client, ADP8870_ISCT1,
+ (pdata->led_on_time & 0x3) << 6);
+ if (ret)
+ goto err_free;
+
+ ret = adp8870_write(client, ADP8870_ISCF,
+ FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
+ if (ret)
+ goto err_free;
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ cur_led = &pdata->leds[i];
+ led_dat = &led[i];
+
+ led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;
+
+ if (led_dat->id > 7 || led_dat->id < 1) {
+ dev_err(&client->dev, "Invalid LED ID %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
+ dev_err(&client->dev, "LED %d used by Backlight\n",
+ led_dat->id);
+ goto err;
+ }
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->cdev.brightness_set = adp8870_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
+ led_dat->client = client;
+ led_dat->new_brightness = LED_OFF;
+ INIT_WORK(&led_dat->work, adp8870_led_work);
+
+ ret = led_classdev_register(&client->dev, &led_dat->cdev);
+ if (ret) {
+ dev_err(&client->dev, "failed to register LED %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ ret = adp8870_led_setup(led_dat);
+ if (ret) {
+ dev_err(&client->dev, "failed to write\n");
+ i++;
+ goto err;
+ }
+ }
+
+ data->led = led;
+
+ return 0;
+
+ err:
+ for (i = i - 1; i >= 0; --i) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ err_free:
+ kfree(led);
+
+ return ret;
+}
+
+static int __devexit adp8870_led_remove(struct i2c_client *client)
+{
+ struct adp8870_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&data->led[i].cdev);
+ cancel_work_sync(&data->led[i].work);
+ }
+
+ kfree(data->led);
+ return 0;
+}
+#else
+static int __devinit adp8870_led_probe(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int __devexit adp8870_led_remove(struct i2c_client *client)
+{
+ return 0;
+}
+#endif
+
+static int adp8870_bl_set(struct backlight_device *bl, int brightness)
+{
+ struct adp8870_bl *data = bl_get_data(bl);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ if (data->pdata->en_ambl_sens) {
+ if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) {
+ /* Disable Ambient Light auto adjust */
+ ret = adp8870_clr_bits(client, ADP8870_MDCR,
+ CMP_AUTOEN);
+ if (ret)
+ return ret;
+ ret = adp8870_write(client, ADP8870_BLMX1, brightness);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
+ * restore daylight l1 sysfs brightness
+ */
+ ret = adp8870_write(client, ADP8870_BLMX1,
+ data->cached_daylight_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_set_bits(client, ADP8870_MDCR,
+ CMP_AUTOEN);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ret = adp8870_write(client, ADP8870_BLMX1, brightness);
+ if (ret)
+ return ret;
+ }
+
+ if (data->current_brightness && brightness == 0)
+ ret = adp8870_set_bits(client,
+ ADP8870_MDCR, DIM_EN);
+ else if (data->current_brightness == 0 && brightness)
+ ret = adp8870_clr_bits(client,
+ ADP8870_MDCR, DIM_EN);
+
+ if (!ret)
+ data->current_brightness = brightness;
+
+ return ret;
+}
+
+static int adp8870_bl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+ if (bl->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return adp8870_bl_set(bl, brightness);
+}
+
+static int adp8870_bl_get_brightness(struct backlight_device *bl)
+{
+ struct adp8870_bl *data = bl_get_data(bl);
+
+ return data->current_brightness;
+}
+
+static const struct backlight_ops adp8870_bl_ops = {
+ .update_status = adp8870_bl_update_status,
+ .get_brightness = adp8870_bl_get_brightness,
+};
+
+static int adp8870_bl_setup(struct backlight_device *bl)
+{
+ struct adp8870_bl *data = bl_get_data(bl);
+ struct i2c_client *client = data->client;
+ struct adp8870_backlight_platform_data *pdata = data->pdata;
+ int ret = 0;
+
+ ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim);
+ if (ret)
+ return ret;
+
+ if (pdata->en_ambl_sens) {
+ data->cached_daylight_max = pdata->l1_daylight_max;
+ ret = adp8870_write(client, ADP8870_BLMX2,
+ pdata->l2_bright_max);
+ if (ret)
+ return ret;
+ ret = adp8870_write(client, ADP8870_BLDM2,
+ pdata->l2_bright_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX3,
+ pdata->l3_office_max);
+ if (ret)
+ return ret;
+ ret = adp8870_write(client, ADP8870_BLDM3,
+ pdata->l3_office_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX4,
+ pdata->l4_indoor_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLDM4,
+ pdata->l4_indor_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLMX5,
+ pdata->l5_dark_max);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLDM5,
+ pdata->l5_dark_dim);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN |
+ L3_EN | L2_EN);
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_CMP_CTL,
+ ALS_CMPR_CFG_VAL(pdata->abml_filt));
+ if (ret)
+ return ret;
+ }
+
+ ret = adp8870_write(client, ADP8870_CFGR,
+ BL_CFGR_VAL(pdata->bl_fade_law, 0));
+ if (ret)
+ return ret;
+
+ ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in,
+ pdata->bl_fade_out));
+ if (ret)
+ return ret;
+ /*
+ * ADP8870 Rev0 requires GDWN_DIS bit set
+ */
+
+ ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY |
+ (data->revid == 0 ? GDWN_DIS : 0));
+
+ return ret;
+}
+
+static ssize_t adp8870_show(struct device *dev, char *buf, int reg)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+
+ mutex_lock(&data->lock);
+ error = adp8870_read(data->client, reg, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%u\n", reg_val);
+}
+
+static ssize_t adp8870_store(struct device *dev, const char *buf,
+ size_t count, int reg)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ adp8870_write(data->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX5);
+}
+
+static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX5);
+}
+static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show,
+ adp8870_bl_l5_dark_max_store);
+
+
+static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX4);
+}
+
+static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX4);
+}
+static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show,
+ adp8870_bl_l4_indoor_max_store);
+
+
+static ssize_t adp8870_bl_l3_office_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX3);
+}
+
+static ssize_t adp8870_bl_l3_office_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX3);
+}
+
+static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show,
+ adp8870_bl_l3_office_max_store);
+
+static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX2);
+}
+
+static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLMX2);
+}
+static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show,
+ adp8870_bl_l2_bright_max_store);
+
+static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLMX1);
+}
+
+static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ if (ret)
+ return ret;
+
+ return adp8870_store(dev, buf, count, ADP8870_BLMX1);
+}
+static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show,
+ adp8870_bl_l1_daylight_max_store);
+
+static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM5);
+}
+
+static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM5);
+}
+static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show,
+ adp8870_bl_l5_dark_dim_store);
+
+static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM4);
+}
+
+static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM4);
+}
+static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show,
+ adp8870_bl_l4_indoor_dim_store);
+
+
+static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM3);
+}
+
+static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM3);
+}
+static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show,
+ adp8870_bl_l3_office_dim_store);
+
+static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM2);
+}
+
+static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM2);
+}
+static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show,
+ adp8870_bl_l2_bright_dim_store);
+
+static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8870_show(dev, buf, ADP8870_BLDM1);
+}
+
+static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8870_store(dev, buf, count, ADP8870_BLDM1);
+}
+static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show,
+ adp8870_bl_l1_daylight_dim_store);
+
+#ifdef ADP8870_EXT_FEATURES
+static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+ uint16_t ret_val;
+
+ mutex_lock(&data->lock);
+ error = adp8870_read(data->client, ADP8870_PH1LEVL, &reg_val);
+ if (error < 0) {
+ mutex_unlock(&data->lock);
+ return error;
+ }
+ ret_val = reg_val;
+ error = adp8870_read(data->client, ADP8870_PH1LEVH, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ /* Return 13-bit conversion value for the first light sensor */
+ ret_val += (reg_val & 0x1F) << 8;
+
+ return sprintf(buf, "%u\n", ret_val);
+}
+static DEVICE_ATTR(ambient_light_level, 0444,
+ adp8870_bl_ambient_light_level_show, NULL);
+
+static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+
+ mutex_lock(&data->lock);
+ error = adp8870_read(data->client, ADP8870_CFGR, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%u\n",
+ ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
+}
+
+static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adp8870_bl *data = dev_get_drvdata(dev);
+ unsigned long val;
+ uint8_t reg_val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val == 0) {
+ /* Enable automatic ambient light sensing */
+ adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
+ } else if ((val > 0) && (val < 6)) {
+ /* Disable automatic ambient light sensing */
+ adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
+
+ /* Set user supplied ambient light zone */
+ mutex_lock(&data->lock);
+ adp8870_read(data->client, ADP8870_CFGR, &reg_val);
+ reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
+ reg_val |= (val - 1) << CFGR_BLV_SHIFT;
+ adp8870_write(data->client, ADP8870_CFGR, reg_val);
+ mutex_unlock(&data->lock);
+ }
+
+ return count;
+}
+static DEVICE_ATTR(ambient_light_zone, 0664,
+ adp8870_bl_ambient_light_zone_show,
+ adp8870_bl_ambient_light_zone_store);
+#endif
+
+static struct attribute *adp8870_bl_attributes[] = {
+ &dev_attr_l5_dark_max.attr,
+ &dev_attr_l5_dark_dim.attr,
+ &dev_attr_l4_indoor_max.attr,
+ &dev_attr_l4_indoor_dim.attr,
+ &dev_attr_l3_office_max.attr,
+ &dev_attr_l3_office_dim.attr,
+ &dev_attr_l2_bright_max.attr,
+ &dev_attr_l2_bright_dim.attr,
+ &dev_attr_l1_daylight_max.attr,
+ &dev_attr_l1_daylight_dim.attr,
+#ifdef ADP8870_EXT_FEATURES
+ &dev_attr_ambient_light_level.attr,
+ &dev_attr_ambient_light_zone.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group adp8870_bl_attr_group = {
+ .attrs = adp8870_bl_attributes,
+};
+
+static int __devinit adp8870_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct backlight_properties props;
+ struct backlight_device *bl;
+ struct adp8870_bl *data;
+ struct adp8870_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ uint8_t reg_val;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ ret = adp8870_read(client, ADP8870_MFDVID, &reg_val);
+ if (ret < 0)
+ return -EIO;
+
+ if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) {
+ dev_err(&client->dev, "failed to probe\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ data->revid = ADP8870_DEVID(reg_val);
+ data->client = client;
+ data->pdata = pdata;
+ data->id = id->driver_data;
+ data->current_brightness = 0;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
+ bl = backlight_device_register(dev_driver_string(&client->dev),
+ &client->dev, data, &adp8870_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ dev_err(&client->dev, "failed to register backlight\n");
+ ret = PTR_ERR(bl);
+ goto out2;
+ }
+
+ data->bl = bl;
+
+ if (pdata->en_ambl_sens)
+ ret = sysfs_create_group(&bl->dev.kobj,
+ &adp8870_bl_attr_group);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to register sysfs\n");
+ goto out1;
+ }
+
+ ret = adp8870_bl_setup(bl);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ backlight_update_status(bl);
+
+ dev_info(&client->dev, "Rev.%d Backlight\n", data->revid);
+
+ if (pdata->num_leds)
+ adp8870_led_probe(client);
+
+ return 0;
+
+out:
+ if (data->pdata->en_ambl_sens)
+ sysfs_remove_group(&data->bl->dev.kobj,
+ &adp8870_bl_attr_group);
+out1:
+ backlight_device_unregister(bl);
+out2:
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit adp8870_remove(struct i2c_client *client)
+{
+ struct adp8870_bl *data = i2c_get_clientdata(client);
+
+ adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
+
+ if (data->led)
+ adp8870_led_remove(client);
+
+ if (data->pdata->en_ambl_sens)
+ sysfs_remove_group(&data->bl->dev.kobj,
+ &adp8870_bl_attr_group);
+
+ backlight_device_unregister(data->bl);
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
+
+ return 0;
+}
+
+static int adp8870_i2c_resume(struct i2c_client *client)
+{
+ adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
+
+ return 0;
+}
+#else
+#define adp8870_i2c_suspend NULL
+#define adp8870_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id adp8870_id[] = {
+ { "adp8870", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp8870_id);
+
+static struct i2c_driver adp8870_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = adp8870_probe,
+ .remove = __devexit_p(adp8870_remove),
+ .suspend = adp8870_i2c_suspend,
+ .resume = adp8870_i2c_resume,
+ .id_table = adp8870_id,
+};
+
+static int __init adp8870_init(void)
+{
+ return i2c_add_driver(&adp8870_driver);
+}
+module_init(adp8870_init);
+
+static void __exit adp8870_exit(void)
+{
+ i2c_del_driver(&adp8870_driver);
+}
+module_exit(adp8870_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP8870 Backlight driver");
+MODULE_ALIAS("platform:adp8870-backlight");
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 47c21fb2c82f..bea53c1a4950 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -789,6 +789,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
i2c_add_driver(&ad5280_driver);
memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
props.max_brightness = MAX_BRIGHENESS;
bl_dev = backlight_device_register("bf537-bl", NULL, NULL,
&bfin_lq035fb_bl_ops, &props);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index ebda6876d3a9..377dde3d5bfc 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1101,12 +1101,10 @@ static int __devinit broadsheetfb_probe(struct platform_device *dev)
videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE);
- videomemory = vmalloc(videomemorysize);
+ videomemory = vzalloc(videomemorysize);
if (!videomemory)
goto err_fb_rel;
- memset(videomemory, 0, videomemorysize);
-
info->screen_base = (char *)videomemory;
info->fbops = &broadsheetfb_ops;
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index fb205843c2c7..784139aed079 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -16,6 +16,8 @@
#include <linux/pci.h>
#include <video/vga.h>
+static bool request_mem_succeeded = false;
+
static struct fb_var_screeninfo efifb_defined __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
@@ -281,7 +283,9 @@ static void efifb_destroy(struct fb_info *info)
{
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+ if (request_mem_succeeded)
+ release_mem_region(info->apertures->ranges[0].base,
+ info->apertures->ranges[0].size);
framebuffer_release(info);
}
@@ -326,14 +330,13 @@ static int __init efifb_setup(char *options)
return 0;
}
-static int __devinit efifb_probe(struct platform_device *dev)
+static int __init efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
int err;
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
- int request_succeeded = 0;
if (!screen_info.lfb_depth)
screen_info.lfb_depth = 32;
@@ -387,7 +390,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
efifb_fix.smem_len = size_remap;
if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) {
- request_succeeded = 1;
+ request_mem_succeeded = true;
} else {
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
@@ -413,7 +416,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+ info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
"0x%x @ 0x%lx\n",
@@ -491,13 +494,12 @@ err_unmap:
err_release_fb:
framebuffer_release(info);
err_release_mem:
- if (request_succeeded)
+ if (request_mem_succeeded)
release_mem_region(efifb_fix.smem_start, size_total);
return err;
}
static struct platform_driver efifb_driver = {
- .probe = efifb_probe,
.driver = {
.name = "efifb",
},
@@ -528,13 +530,21 @@ static int __init efifb_init(void)
if (!screen_info.lfb_linelength)
return -ENODEV;
- ret = platform_driver_register(&efifb_driver);
+ ret = platform_device_register(&efifb_device);
+ if (ret)
+ return ret;
- if (!ret) {
- ret = platform_device_register(&efifb_device);
- if (ret)
- platform_driver_unregister(&efifb_driver);
+ /*
+ * This is not just an optimization. We will interfere
+ * with a real driver if we get reprobed, so don't allow
+ * it.
+ */
+ ret = platform_driver_probe(&efifb_driver, efifb_probe);
+ if (ret) {
+ platform_device_unregister(&efifb_device);
+ return ret;
}
+
return ret;
}
module_init(efifb_init);
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 1b94643ecbcf..fbef15f7a218 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -231,11 +231,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
videomemorysize = (DPY_W*DPY_H)/8;
- if (!(videomemory = vmalloc(videomemorysize)))
+ videomemory = vzalloc(videomemorysize);
+ if (!videomemory)
return retval;
- memset(videomemory, 0, videomemorysize);
-
info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
if (!info)
goto err_fballoc;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index d2ccfd6e662c..f135dbead07d 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -856,10 +856,10 @@ failed_platform_init:
dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
fbi->map_dma);
failed_map:
- clk_put(fbi->clk);
-failed_getclock:
iounmap(fbi->regs);
failed_ioremap:
+ clk_put(fbi->clk);
+failed_getclock:
release_mem_region(res->start, resource_size(res));
failed_req:
kfree(info->pseudo_palette);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index ed64edfd2c43..97d45e5115e2 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -628,12 +628,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
/* we need to add a spare page because our csum caching scheme walks
* to the end of the page */
videomemorysize = PAGE_SIZE + (fw * fh);
- videomemory = vmalloc(videomemorysize);
+ videomemory = vzalloc(videomemorysize);
if (!videomemory)
goto err_fb_rel;
- memset(videomemory, 0, videomemorysize);
-
info->screen_base = (char __force __iomem *)videomemory;
info->fbops = &metronomefb_ops;
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 48c3ea8652b6..cb175fe7abc0 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -1128,3 +1128,4 @@ EXPORT_SYMBOL(fb_find_best_mode);
EXPORT_SYMBOL(fb_find_nearest_mode);
EXPORT_SYMBOL(fb_videomode_to_modelist);
EXPORT_SYMBOL(fb_find_mode);
+EXPORT_SYMBOL(fb_find_mode_cvt);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 35f61dd0cb3a..bb95ec56d25d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -623,19 +623,21 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no IO memory defined\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto failed_put_clk;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ defined\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto failed_put_clk;
}
info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
if (info == NULL) {
- clk_put(clk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto failed_put_clk;
}
/* Initialize private data */
@@ -671,7 +673,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
fbi->reg_base = ioremap_nocache(res->start, resource_size(res));
if (fbi->reg_base == NULL) {
ret = -ENOMEM;
- goto failed;
+ goto failed_free_info;
}
/*
@@ -683,7 +685,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
&fbi->fb_start_dma, GFP_KERNEL);
if (info->screen_base == NULL) {
ret = -ENOMEM;
- goto failed;
+ goto failed_free_info;
}
info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
@@ -772,8 +774,9 @@ failed_free_clk:
failed_free_fbmem:
dma_free_coherent(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
-failed:
+failed_free_info:
kfree(info);
+failed_put_clk:
clk_put(clk);
dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 0352afa49a39..4aecf213c9be 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -235,13 +235,12 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct s3c_fb_win *win = info->par;
- struct s3c_fb_pd_win *windata = win->windata;
struct s3c_fb *sfb = win->parent;
dev_dbg(sfb->dev, "checking parameters\n");
- var->xres_virtual = max((unsigned int)windata->virtual_x, var->xres);
- var->yres_virtual = max((unsigned int)windata->virtual_y, var->yres);
+ var->xres_virtual = max(var->xres_virtual, var->xres);
+ var->yres_virtual = max(var->yres_virtual, var->yres);
if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) {
dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n",
@@ -558,6 +557,13 @@ static int s3c_fb_set_par(struct fb_info *info)
vidosd_set_alpha(win, alpha);
vidosd_set_size(win, data);
+ /* Enable DMA channel for this window */
+ if (sfb->variant.has_shadowcon) {
+ data = readl(sfb->regs + SHADOWCON);
+ data |= SHADOWCON_CHx_ENABLE(win_no);
+ writel(data, sfb->regs + SHADOWCON);
+ }
+
data = WINCONx_ENWIN;
/* note, since we have to round up the bits-per-pixel, we end up
@@ -637,13 +643,6 @@ static int s3c_fb_set_par(struct fb_info *info)
writel(data, regs + sfb->variant.wincon + (win_no * 4));
writel(0x0, regs + sfb->variant.winmap + (win_no * 4));
- /* Enable DMA channel for this window */
- if (sfb->variant.has_shadowcon) {
- data = readl(sfb->regs + SHADOWCON);
- data |= SHADOWCON_CHx_ENABLE(win_no);
- writel(data, sfb->regs + SHADOWCON);
- }
-
shadow_protect_win(win, 0);
return 0;
@@ -1487,11 +1486,10 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
- kfree(sfb);
-
pm_runtime_put_sync(sfb->dev);
pm_runtime_disable(sfb->dev);
+ kfree(sfb);
return 0;
}
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 3b7f2f5bae71..4de541ca9c52 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2237,6 +2237,22 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
&info->modelist);
#endif
info->var = savagefb_var800x600x8;
+ /* if a panel was detected, default to a CVT mode instead */
+ if (par->SavagePanelWidth) {
+ struct fb_videomode cvt_mode;
+
+ memset(&cvt_mode, 0, sizeof(cvt_mode));
+ cvt_mode.xres = par->SavagePanelWidth;
+ cvt_mode.yres = par->SavagePanelHeight;
+ cvt_mode.refresh = 60;
+ /* FIXME: if we know there is only the panel
+ * we can enable reduced blanking as well */
+ if (fb_find_mode_cvt(&cvt_mode, 0, 0))
+ printk(KERN_WARNING "No CVT mode found for panel\n");
+ else if (fb_find_mode(&info->var, info, NULL, NULL, 0,
+ &cvt_mode, 0) != 3)
+ info->var = savagefb_var800x600x8;
+ }
if (mode_option) {
fb_find_mode(&info->var, info, mode_option,
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 6ae40b630dc9..7d54e2c612f7 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1127,23 +1127,16 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
struct fb_info *info = hdmi->info;
unsigned long parent_rate = 0, hdmi_rate;
- /* A device has been plugged in */
- pm_runtime_get_sync(hdmi->dev);
-
ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
- if (ret < 0) {
- pm_runtime_put(hdmi->dev);
+ if (ret < 0)
goto out;
- }
hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
/* Reconfigure the clock */
ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
- if (ret < 0) {
- pm_runtime_put(hdmi->dev);
+ if (ret < 0)
goto out;
- }
msleep(10);
sh_hdmi_configure(hdmi);
@@ -1191,7 +1184,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
fb_set_suspend(hdmi->info, 1);
console_unlock();
- pm_runtime_put(hdmi->dev);
}
out:
@@ -1312,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn);
pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* Product and revision IDs are 0 in sh-mobile version */
dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
@@ -1340,7 +1332,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
ecodec:
free_irq(irq, hdmi);
ereqirq:
- pm_runtime_suspend(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
iounmap(hdmi->base);
emap:
@@ -1377,7 +1369,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
free_irq(irq, hdmi);
/* Wait for already scheduled work */
cancel_delayed_work_sync(&hdmi->edid_work);
- pm_runtime_suspend(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable(hdmi->hdmi_clk);
clk_put(hdmi->hdmi_clk);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 404c03b4b7c7..019dbd3f12b2 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -470,7 +470,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
unsigned long tmp;
int bpp = 0;
unsigned long ldddsr;
- int k, m;
+ int k, m, ret;
/* enable clocks before accessing the hardware */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -540,7 +540,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
board_cfg = &ch->cfg.board_cfg;
if (board_cfg->setup_sys) {
- int ret = board_cfg->setup_sys(board_cfg->board_data,
+ ret = board_cfg->setup_sys(board_cfg->board_data,
ch, &sh_mobile_lcdc_sys_bus_ops);
if (ret)
return ret;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 53b2c5aae067..305c975b1787 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,9 +1265,11 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
static void vga16fb_destroy(struct fb_info *info)
{
+ struct platform_device *dev = container_of(info->device, struct platform_device, dev);
iounmap(info->screen_base);
fb_dealloc_cmap(&info->cmap);
/* XXX unshare VGA regions */
+ platform_set_drvdata(dev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index a20218c2fda8..beac52fc1c0e 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -395,10 +395,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
spin_lock_init(&info->dirty_lock);
spin_lock_init(&info->resize_lock);
- info->fb = vmalloc(fb_size);
+ info->fb = vzalloc(fb_size);
if (info->fb == NULL)
goto error_nomem;
- memset(info->fb, 0, fb_size);
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0f1da45ba47d..e058ace2a4ad 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -40,9 +40,6 @@ struct virtio_balloon
/* Waiting for host to ack the pages we released. */
struct completion acked;
- /* Do we have to tell Host *before* we reuse pages? */
- bool tell_host_first;
-
/* The pages we've told the Host we're not using. */
unsigned int num_pages;
struct list_head pages;
@@ -151,13 +148,14 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages--;
}
- if (vb->tell_host_first) {
- tell_host(vb, vb->deflate_vq);
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
- } else {
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
- tell_host(vb, vb->deflate_vq);
- }
+
+ /*
+ * Note that if
+ * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
+ * is true, we *have* to do it in this order
+ */
+ tell_host(vb, vb->deflate_vq);
+ release_pages_by_pfn(vb->pfns, vb->num_pfns);
}
static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -325,9 +323,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
- vb->tell_host_first
- = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
-
return 0;
out_del_vqs:
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b0043fb26a4d..68b9136847af 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -82,6 +82,9 @@ struct vring_virtqueue
/* Host supports indirect buffers */
bool indirect;
+ /* Host publishes avail event idx */
+ bool event;
+
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
void virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 new, old;
START_USE(vq);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb();
- vq->vring.avail->idx += vq->num_added;
+ old = vq->vring.avail->idx;
+ new = vq->vring.avail->idx = old + vq->num_added;
vq->num_added = 0;
/* Need to update avail index before checking if we should notify */
virtio_mb();
- if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
+ if (vq->event ?
+ vring_need_event(vring_avail_event(&vq->vring), new, old) :
+ !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
/* Prod other side to tell it about changes. */
vq->notify(&vq->vq);
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
ret = vq->data[i];
detach_buf(vq, i);
vq->last_used_idx++;
+ /* If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call. */
+ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
+ vring_used_event(&vq->vring) = vq->last_used_idx;
+ virtio_mb();
+ }
+
END_USE(vq);
return ret;
}
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
+ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vring_used_event(&vq->vring) = vq->last_used_idx;
virtio_mb();
if (unlikely(more_used(vq))) {
END_USE(vq);
@@ -357,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 bufs;
+
+ START_USE(vq);
+
+ /* We optimistically turn back on interrupts, then check if there was
+ * more to do. */
+ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ /* TODO: tune this threshold */
+ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
+ vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
+ virtio_mb();
+ if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
+ END_USE(vq);
+ return false;
+ }
+
+ END_USE(vq);
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -438,6 +484,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
/* No callback? Tell other side not to bother us. */
if (!callback)
@@ -472,6 +519,8 @@ void vring_transport_features(struct virtio_device *vdev)
switch (i) {
case VIRTIO_RING_F_INDIRECT_DESC:
break;
+ case VIRTIO_RING_F_EVENT_IDX:
+ break;
default:
/* We don't understand this bit. */
clear_bit(i, vdev->features);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 00d615d7aa21..979d6eed9a0f 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
config W1_MASTER_DS1WM
tristate "Maxim DS1WM 1-wire busmaster"
- depends on W1
+ depends on W1 && GENERIC_HARDIRQS
help
Say Y here to enable the DS1WM 1-wire driver, such as that
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 3ff822b48145..30df85d8fca8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
+#ifdef CONFIG_SMP
struct irq_desc *desc = irq_to_desc(irq);
-#ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
#endif
@@ -626,6 +626,9 @@ int xen_allocate_pirq_gsi(unsigned gsi)
*
* Note: We don't assign an event channel until the irq actually started
* up. Return an existing irq if we've already got one for the gsi.
+ *
+ * Shareable implies level triggered, not shareable implies edge
+ * triggered here.
*/
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
unsigned pirq, int shareable, char *name)
@@ -664,16 +667,13 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
pirq_query_unmask(irq);
/* We try to use the handler with the appropriate semantic for the
- * type of interrupt: if the interrupt doesn't need an eoi
- * (pirq_needs_eoi returns false), we treat it like an edge
- * triggered interrupt so we use handle_edge_irq.
- * As a matter of fact this only happens when the corresponding
- * physical interrupt is edge triggered or an msi.
+ * type of interrupt: if the interrupt is an edge triggered
+ * interrupt we use handle_edge_irq.
*
- * On the other hand if the interrupt needs an eoi (pirq_needs_eoi
- * returns true) we treat it like a level triggered interrupt so we
- * use handle_fasteoi_irq like the native code does for this kind of
+ * On the other hand if the interrupt is level triggered we use
+ * handle_fasteoi_irq like the native code does for this kind of
* interrupts.
+ *
* Depending on the Xen version, pirq_needs_eoi might return true
* not only for level triggered interrupts but for edge triggered
* interrupts too. In any case Xen always honors the eoi mechanism,
@@ -681,7 +681,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* hasn't received an eoi yet. Therefore using the fasteoi handler
* is the right choice either way.
*/
- if (pirq_needs_eoi(irq))
+ if (shareable)
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
handle_fasteoi_irq, name);
else
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 65ea21a97492..6e8c15a23201 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -147,9 +147,15 @@ void __init xen_swiotlb_init(int verbose)
{
unsigned long bytes;
int rc;
-
- xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
- xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+ unsigned long nr_tbl;
+
+ nr_tbl = swioltb_nr_tbl();
+ if (nr_tbl)
+ xen_io_tlb_nslabs = nr_tbl;
+ else {
+ xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
+ xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+ }
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;